diff --git a/README.md b/README.md index 8b82c6e4b..8a1812f19 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ With a user logged in to existing Kubernetes or Openshift environment with Forkl # Start a local Openshift console server on the background. # - The console will be available in http://localhost:9000 # - The inventory URL can be set using an enviorment variable, -# ( default value for INVENTORY_SERVER_HOST is http://localhost:30088 ) +# ( default value for INVENTORY_SERVER_HOST is https://localhost:30444 ) # for example: # export INVENTORY_SERVER_HOST=https://virt-konveyor-forklift.apps.example.com # - To close the console server run: diff --git a/ci/deploy-all.sh b/ci/deploy-all.sh index 074a42d22..ebe93eb70 100755 --- a/ci/deploy-all.sh +++ b/ci/deploy-all.sh @@ -19,6 +19,8 @@ you may not be able to start a virtual machine or use some network interfaces. --with-ovirt-provider will install fake ovirt provider --with-vmware-provider will install vmware simulator. --with-openstack-provider will install packstack simulator. + --no-kubevirt don't install kubebirt. + --no-console don't install OKD console. " exit 0 fi @@ -47,15 +49,29 @@ echo "Found: ${CONTAINER_CMD}" # Create kind cluster bash ${script_dir}/deploy-cluster.sh -# Install okd console -bash ${script_dir}/deploy-console.sh +# Install volume poplulator +bash ${script_dir}/deploy-volume-populator.sh -# Install kubevirt -bash ${script_dir}/deploy-kubevirt.sh +# Install cert manager +bash ${script_dir}/deploy-cert-manager.sh # Install forklift bash ${script_dir}/deploy-forklift.sh +# Install kubevirt +if [[ $@ != *'--no-kubevirt'* ]]; then + bash ${script_dir}/deploy-kubevirt.sh +fi + +# Install okd console +if [[ $@ != *'--no-console'* ]]; then + # Get console service account and tls certifications + kubectl apply -f ${script_dir}/yaml/okd-console-tls-cert.yaml + kubectl wait certificate -n konveyor-forklift console-certificate --for condition=Ready=True --timeout=${K8S_TIMEOUT} + + bash ${script_dir}/deploy-console.sh +fi + # Install mock providers if [[ $@ == *'--with-all-providers'* ]]; then # make the submodule the current working direcotry for running the script @@ -94,7 +110,7 @@ echo "Cluster information:" echo " kubectl cluster-info --context kind-kind" echo "" echo " API Server: https://127.0.0.1:6443/" -echo " Web console: http://localhost:30080/" +echo " Web console: https://localhost:30443/" echo "" echo " configuration file - '${config_path}' ( example: cp ${config_path} ~/.kube/config )" echo " admin token - 'abcdef.0123456789abcdef'" diff --git a/ci/deploy-cert-manager.sh b/ci/deploy-cert-manager.sh new file mode 100755 index 000000000..f212c0e38 --- /dev/null +++ b/ci/deploy-cert-manager.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -euo pipefail +script_dir=$(dirname "$0") + +K8S_TIMEOUT=${K8S_TIMEOUT:="360s"} + +echo "" +echo "Installing Cert Manager" +echo "=======================" + +# Install cert-manager (we use basic functionality of cert-manager, we don't have to use its latest version) +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml + +kubectl wait deployment -n cert-manager cert-manager-cainjector --for condition=Available=True --timeout=${K8S_TIMEOUT} +kubectl wait deployment -n cert-manager cert-manager --for condition=Available=True --timeout=${K8S_TIMEOUT} +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=${K8S_TIMEOUT} diff --git a/ci/deploy-cluster.sh b/ci/deploy-cluster.sh index 29aed89d5..616f7d38b 100644 --- a/ci/deploy-cluster.sh +++ b/ci/deploy-cluster.sh @@ -48,8 +48,10 @@ nodes: extraPortMappings: - containerPort: 30080 hostPort: 30080 - - containerPort: 30088 - hostPort: 30088 + - containerPort: 30443 + hostPort: 30443 + - containerPort: 30444 + hostPort: 30444 containerdConfigPatches: - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] diff --git a/ci/deploy-console.sh b/ci/deploy-console.sh index 24c516944..706a9c2a7 100755 --- a/ci/deploy-console.sh +++ b/ci/deploy-console.sh @@ -4,11 +4,13 @@ set -euo pipefail script_dir=$(dirname "$0") K8S_TIMEOUT=${K8S_TIMEOUT:="360s"} -OKD_CONSOLE_YAML=${script_dir}/yaml/okd-console.yaml +OKD_CONSOLE_YAML=${OKD_CONSOLE_YAML:="${script_dir}/yaml/okd-console-tls.yaml"} FORKLIFT_PLUGIN_UPSTREAM_IMG=quay.io/kubev2v/forklift-console-plugin:latest FORKLIFT_PLUGIN_IMAGE=${FORKLIFT_PLUGIN_IMAGE:="quay.io/kubev2v/forklift-console-plugin:latest"} +#-------------------- + # Install OKD console # ------------------- echo "" @@ -22,7 +24,7 @@ kubectl apply -f ${script_dir}/yaml/crds/console kubectl apply -f ${script_dir}/yaml/crds/forklift echo "" -echo "deploy OKD console (port: 30080)" +echo "deploy OKD console" cat ${OKD_CONSOLE_YAML} | \ sed "s/${FORKLIFT_PLUGIN_UPSTREAM_IMG//\//\\/}/${FORKLIFT_PLUGIN_IMAGE//\//\\/}/g" | \ @@ -32,7 +34,7 @@ echo "" echo "waiting for OKD console service..." echo "==================================" -kubectl wait deployment -n okd-console console --for condition=Available=True --timeout=${K8S_TIMEOUT} +kubectl wait deployment -n konveyor-forklift console --for condition=Available=True --timeout=${K8S_TIMEOUT} echo "" echo "waiting for forklift console plugin service..." diff --git a/ci/deploy-forklift.sh b/ci/deploy-forklift.sh index d619c8ad2..7c44d1e64 100755 --- a/ci/deploy-forklift.sh +++ b/ci/deploy-forklift.sh @@ -62,14 +62,6 @@ EOF # -------------------- -echo "" -echo "Installing VolumePopulator CRD" -echo "==============================" - -kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/volume-data-source-validator/v1.0.1/client/config/crd/populator.storage.k8s.io_volumepopulators.yaml - -# -------------------- - # Wait for forklift operator to start, and create a controller instance echo "" echo "Waiting for forklift-operator (may take a few minutes)" @@ -103,4 +95,4 @@ EOF # Wait for forklift inventory service, then expose it on port 30088 while ! kubectl get service -n ${FORKLIFT_NAMESPACE} forklift-inventory; do sleep 30; done kubectl patch service -n ${FORKLIFT_NAMESPACE} forklift-inventory --type='merge' \ - -p '{"spec":{"type":"NodePort","ports":[{"name":"api-http","protocol":"TCP","targetPort":8080,"port":8080,"nodePort":30088}]}}' + -p '{"spec":{"type":"NodePort","ports":[{"name":"api-https","protocol":"TCP","targetPort":8443,"port":8443,"nodePort":30444}]}}' diff --git a/ci/deploy-volume-populator.sh b/ci/deploy-volume-populator.sh new file mode 100755 index 000000000..aba4a575c --- /dev/null +++ b/ci/deploy-volume-populator.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -euo pipefail + +echo "" +echo "Installing VolumePopulator CRD" +echo "==============================" + +kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/volume-data-source-validator/v1.0.1/client/config/crd/populator.storage.k8s.io_volumepopulators.yaml diff --git a/ci/start-console.sh b/ci/start-console.sh index 8adc46e16..6b042589a 100755 --- a/ci/start-console.sh +++ b/ci/start-console.sh @@ -12,8 +12,8 @@ PLUGIN_URL=${PLUGIN_URL:-"http://localhost:9001"} CONTAINER_NETWORK_TYPE=${CONTAINER_NETWORK_TYPE:-"host"} CONSOLE_IMAGE=${CONSOLE_IMAGE:-"quay.io/openshift/origin-console:latest"} CONSOLE_PORT=${CONSOLE_PORT:-9000} -INVENTORY_SERVER_HOST=${INVENTORY_SERVER_HOST:-"http://localhost:30088"} -MUST_GATHER_API_SERVER_HOST=${MUST_GATHER_API_SERVER_HOST:-"http://localhost:30089"} +INVENTORY_SERVER_HOST=${INVENTORY_SERVER_HOST:-"https://localhost:30444"} +MUST_GATHER_API_SERVER_HOST=${MUST_GATHER_API_SERVER_HOST:-"https://localhost:30445"} if [[ ${CONSOLE_IMAGE} =~ ^localhost/ ]]; then PULL_POLICY="never" @@ -27,8 +27,6 @@ if podman container exists ${CONSOLE_CONTAINER_NAME}; then exit 1 fi -kubectl port-forward -n ${FORKLIFT_NAMESPACE} service/forklift-inventory 65300:8443 & - # Base setup for the bridge if [[ $@ == *'--auth'* ]]; then setup_bridge_for_openshift_oauth diff --git a/ci/stop-console.sh b/ci/stop-console.sh index c86a91e87..cd91dd236 100755 --- a/ci/stop-console.sh +++ b/ci/stop-console.sh @@ -4,8 +4,6 @@ set -euo pipefail CONSOLE_CONTAINER_NAME=okd-console -pkill kubectl - # Test is console already running if podman container exists ${CONSOLE_CONTAINER_NAME}; then podman container stop ${CONSOLE_CONTAINER_NAME} diff --git a/ci/yaml/okd-console-tls-cert.yaml b/ci/yaml/okd-console-tls-cert.yaml new file mode 100644 index 000000000..2a95c5fa4 --- /dev/null +++ b/ci/yaml/okd-console-tls-cert.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: console + namespace: konveyor-forklift +automountServiceAccountToken: true +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: console-console-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: console + namespace: konveyor-forklift +--- +apiVersion: v1 +kind: Secret +metadata: + name: console + namespace: konveyor-forklift + annotations: + kubernetes.io/service-account.name: console +type: kubernetes.io/service-account-token +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: console-certificate + namespace: konveyor-forklift +spec: + commonName: console-certificate + dnsNames: + - console.konveyor-forklift.svc + - console.konveyor-forklift.svc.cluster.local + isCA: true + issuerRef: + group: cert-manager.io + kind: Issuer + name: forklift-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: console-serving-cert + \ No newline at end of file diff --git a/ci/yaml/okd-console-tls.yaml b/ci/yaml/okd-console-tls.yaml new file mode 100644 index 000000000..658933890 --- /dev/null +++ b/ci/yaml/okd-console-tls.yaml @@ -0,0 +1,119 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: konveyor-forklift +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: forklift-console-plugin + namespace: konveyor-forklift + labels: + app: forklift-console-plugin +spec: + replicas: 1 + selector: + matchLabels: + app: forklift-console-plugin + template: + metadata: + labels: + app: forklift-console-plugin + spec: + containers: + - name: forklift-console-plugin + image: quay.io/kubev2v/forklift-console-plugin:latest + ports: + - containerPort: 8080 + protocol: TCP + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: forklift-console-plugin + namespace: konveyor-forklift + labels: + app: forklift-console-plugin +spec: + ports: + - name: 8080-tcp + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + app: forklift-console-plugin + type: ClusterIP + sessionAffinity: None +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: console + namespace: konveyor-forklift +spec: + replicas: 1 + selector: + matchLabels: + name: console + template: + metadata: + labels: + name: console + spec: + serviceAccountName: console + volumes: + - name: forklift-cert + secret: + secretName: forklift-cert + - name: console-serving-cert + secret: + secretName: console-serving-cert + containers: + - name: console + image: quay.io/openshift/origin-console:latest + volumeMounts: + - name: console-serving-cert + mountPath: /var/run/secrets/forklift + - name: forklift-cert + mountPath: /etc/ssl/certs/forklift-ca.crt + subPath: ca.crt + env: + - name: BRIDGE_LISTEN + value: "https://0.0.0.0:9000" + - name: BRIDGE_TLS_CERT_FILE + value: /var/run/secrets/forklift/tls.crt + - name: BRIDGE_TLS_KEY_FILE + value: /var/run/secrets/forklift/tls.key + - name: BRIDGE_PLUGINS + value: forklift-console-plugin=http://forklift-console-plugin.konveyor-forklift.svc.cluster.local:8080 + - name: BRIDGE_PLUGIN_PROXY + value: | + {"services":[ + { + "consoleAPIPath":"/api/proxy/plugin/forklift-console-plugin/forklift-inventory/", + "endpoint":"https://forklift-inventory.konveyor-forklift.svc.cluster.local:8443", + "authorize":true + }, + { + "consoleAPIPath":"/api/proxy/plugin/forklift-console-plugin/must-gather-api/", + "endpoint":"https://must-gather-api.konveyor-forklift.svc.cluster.local:8443", + "authorize":true + }]} +--- +apiVersion: v1 +kind: Service +metadata: + name: console + namespace: konveyor-forklift +spec: + ports: + - name: api-https + nodePort: 30443 + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + name: console + sessionAffinity: None + type: NodePort diff --git a/ci/yaml/okd-console.yaml b/ci/yaml/okd-console.yaml index 51d235f54..5780e5579 100644 --- a/ci/yaml/okd-console.yaml +++ b/ci/yaml/okd-console.yaml @@ -29,6 +29,25 @@ spec: imagePullPolicy: Always --- apiVersion: v1 +kind: ServiceAccount +metadata: + name: console + namespace: konveyor-forklift +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: console-console-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: console + namespace: konveyor-forklift +--- +apiVersion: v1 kind: Service metadata: name: forklift-console-plugin @@ -46,35 +65,11 @@ spec: type: ClusterIP sessionAffinity: None --- -apiVersion: v1 -kind: Namespace -metadata: - name: okd-console ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: console - namespace: okd-console ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: console-console-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: console - namespace: okd-console ---- apiVersion: apps/v1 kind: Deployment metadata: name: console - namespace: okd-console + namespace: konveyor-forklift spec: replicas: 1 selector: @@ -110,7 +105,7 @@ apiVersion: v1 kind: Service metadata: name: console - namespace: okd-console + namespace: konveyor-forklift spec: ports: - name: api-http diff --git a/docs/start-dev-server.md b/docs/start-dev-server.md index 58e00d434..f39bc04d3 100644 --- a/docs/start-dev-server.md +++ b/docs/start-dev-server.md @@ -64,8 +64,8 @@ npm run cluster:delete | -------|--------------| | CONSOLE_IMAGE | The console image to run ( default `quay.io/openshift/origin-console:latest` )| | CONSOLE_PORT | Expose the console web application on port ( default `9000` )| -| INVENTORY_SERVER_HOST | URL of Forklift inventory server ( default `http://localhost:8080` )| -| MUST_GATHER_API_SERVER_HOST | URL of Forklift must gather server ( default `http://localhost:8090` )| +| INVENTORY_SERVER_HOST | URL of Forklift inventory server ( default `https://localhost:30444` )| +| MUST_GATHER_API_SERVER_HOST | URL of Forklift must gather server ( default `https://localhost:30445` )| | BRIDGE_K8S_AUTH_BEARER_TOKEN | Bearer token of user account ( on openshift token default to `$(oc whoami -t)` )| | BRIDGE_K8S_MODE_OFF_CLUSTER_ENDPOINT | Kubernetes API servere URL (default, guess useing kubeconfig file) | @@ -85,8 +85,8 @@ In this scenario the inventory and must-gather URLs will be on the local machine ``` bash # When running locally the -export INVENTORY_SERVER_HOST=http://localhost:30088 -export MUST_GATHER_API_SERVER_HOST=http://localhost:< the port assigned for must gather role > +export INVENTORY_SERVER_HOST=https://localhost:30444 +export MUST_GATHER_API_SERVER_HOST=https://localhost:< the port assigned for must gather role > ``` ### Running forklift operator on CRC or Openshift @@ -104,4 +104,4 @@ export INVENTORY_SERVER_HOST=https:// ### KinD -The development cluster using kind will expose the inventory server on port 30088 `http://loclhost:30088`. +The development cluster using kind will expose the inventory server on port 30443 `https://loclhost:30443`. diff --git a/package.json b/package.json index 6d4e7f885..2cd26c7c8 100644 --- a/package.json +++ b/package.json @@ -35,7 +35,7 @@ "cluster:delete": "bash ./ci/clean-cluster.sh", "e2e:cluster": "bash ./ci/deploy-cluster.sh", "e2e:build": "bash ./ci/build-and-push-images.sh", - "e2e:console": "bash ./ci/deploy-console.sh", + "e2e:console": "OKD_CONSOLE_YAML=./ci/yaml/okd-console.yaml bash ./ci/deploy-console.sh", "e2e:pre-test": "npm run e2e:cluster && npm run e2e:build && npm run e2e:console" }, "devDependencies": {