Skip to content

Commit

Permalink
Use tls server for in cluster console
Browse files Browse the repository at this point in the history
Signed-off-by: yaacov <yzamir@redhat.com>
  • Loading branch information
yaacov committed Jun 19, 2023
1 parent 3979328 commit 446fef1
Show file tree
Hide file tree
Showing 14 changed files with 256 additions and 58 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ With a user logged in to existing Kubernetes or Openshift environment with Forkl
# Start a local Openshift console server on the background.
# - The console will be available in http://localhost:9000
# - The inventory URL can be set using an enviorment variable,
# ( default value for INVENTORY_SERVER_HOST is http://localhost:30088 )
# ( default value for INVENTORY_SERVER_HOST is https://localhost:30444 )
# for example:
# export INVENTORY_SERVER_HOST=https://virt-konveyor-forklift.apps.example.com
# - To close the console server run:
Expand Down
26 changes: 21 additions & 5 deletions ci/deploy-all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ you may not be able to start a virtual machine or use some network interfaces.
--with-ovirt-provider will install fake ovirt provider
--with-vmware-provider will install vmware simulator.
--with-openstack-provider will install packstack simulator.
--no-kubevirt don't install kubebirt.
--no-console don't install OKD console.
"
exit 0
fi
Expand Down Expand Up @@ -47,15 +49,29 @@ echo "Found: ${CONTAINER_CMD}"
# Create kind cluster
bash ${script_dir}/deploy-cluster.sh

# Install okd console
bash ${script_dir}/deploy-console.sh
# Install volume poplulator
bash ${script_dir}/deploy-volume-populator.sh

# Install kubevirt
bash ${script_dir}/deploy-kubevirt.sh
# Install cert manager
bash ${script_dir}/deploy-cert-manager.sh

# Install forklift
bash ${script_dir}/deploy-forklift.sh

# Install kubevirt
if [[ $@ != *'--no-kubevirt'* ]]; then
bash ${script_dir}/deploy-kubevirt.sh
fi

# Install okd console
if [[ $@ != *'--no-console'* ]]; then
# Get console service account and tls certifications
kubectl apply -f ${script_dir}/yaml/okd-console-tls-cert.yaml
kubectl wait certificate -n konveyor-forklift console-certificate --for condition=Ready=True --timeout=${K8S_TIMEOUT}

bash ${script_dir}/deploy-console.sh
fi

# Install mock providers
if [[ $@ == *'--with-all-providers'* ]]; then
# make the submodule the current working direcotry for running the script
Expand Down Expand Up @@ -94,7 +110,7 @@ echo "Cluster information:"
echo " kubectl cluster-info --context kind-kind"
echo ""
echo " API Server: https://127.0.0.1:6443/"
echo " Web console: http://localhost:30080/"
echo " Web console: https://localhost:30443/"
echo ""
echo " configuration file - '${config_path}' ( example: cp ${config_path} ~/.kube/config )"
echo " admin token - 'abcdef.0123456789abcdef'"
Expand Down
17 changes: 17 additions & 0 deletions ci/deploy-cert-manager.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

set -euo pipefail
script_dir=$(dirname "$0")

K8S_TIMEOUT=${K8S_TIMEOUT:="360s"}

echo ""
echo "Installing Cert Manager"
echo "======================="

# Install cert-manager (we use basic functionality of cert-manager, we don't have to use its latest version)
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml

kubectl wait deployment -n cert-manager cert-manager-cainjector --for condition=Available=True --timeout=${K8S_TIMEOUT}
kubectl wait deployment -n cert-manager cert-manager --for condition=Available=True --timeout=${K8S_TIMEOUT}
kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=${K8S_TIMEOUT}
6 changes: 4 additions & 2 deletions ci/deploy-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,10 @@ nodes:
extraPortMappings:
- containerPort: 30080
hostPort: 30080
- containerPort: 30088
hostPort: 30088
- containerPort: 30443
hostPort: 30443
- containerPort: 30444
hostPort: 30444
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
Expand Down
8 changes: 5 additions & 3 deletions ci/deploy-console.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@ set -euo pipefail
script_dir=$(dirname "$0")

K8S_TIMEOUT=${K8S_TIMEOUT:="360s"}
OKD_CONSOLE_YAML=${script_dir}/yaml/okd-console.yaml
OKD_CONSOLE_YAML=${OKD_CONSOLE_YAML:="${script_dir}/yaml/okd-console-tls.yaml"}

FORKLIFT_PLUGIN_UPSTREAM_IMG=quay.io/kubev2v/forklift-console-plugin:latest
FORKLIFT_PLUGIN_IMAGE=${FORKLIFT_PLUGIN_IMAGE:="quay.io/kubev2v/forklift-console-plugin:latest"}

#--------------------

# Install OKD console
# -------------------
echo ""
Expand All @@ -22,7 +24,7 @@ kubectl apply -f ${script_dir}/yaml/crds/console
kubectl apply -f ${script_dir}/yaml/crds/forklift

echo ""
echo "deploy OKD console (port: 30080)"
echo "deploy OKD console"

cat ${OKD_CONSOLE_YAML} | \
sed "s/${FORKLIFT_PLUGIN_UPSTREAM_IMG//\//\\/}/${FORKLIFT_PLUGIN_IMAGE//\//\\/}/g" | \
Expand All @@ -32,7 +34,7 @@ echo ""
echo "waiting for OKD console service..."
echo "=================================="

kubectl wait deployment -n okd-console console --for condition=Available=True --timeout=${K8S_TIMEOUT}
kubectl wait deployment -n konveyor-forklift console --for condition=Available=True --timeout=${K8S_TIMEOUT}

echo ""
echo "waiting for forklift console plugin service..."
Expand Down
10 changes: 1 addition & 9 deletions ci/deploy-forklift.sh
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,6 @@ EOF

# --------------------

echo ""
echo "Installing VolumePopulator CRD"
echo "=============================="

kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/volume-data-source-validator/v1.0.1/client/config/crd/populator.storage.k8s.io_volumepopulators.yaml

# --------------------

# Wait for forklift operator to start, and create a controller instance
echo ""
echo "Waiting for forklift-operator (may take a few minutes)"
Expand Down Expand Up @@ -103,4 +95,4 @@ EOF
# Wait for forklift inventory service, then expose it on port 30088
while ! kubectl get service -n ${FORKLIFT_NAMESPACE} forklift-inventory; do sleep 30; done
kubectl patch service -n ${FORKLIFT_NAMESPACE} forklift-inventory --type='merge' \
-p '{"spec":{"type":"NodePort","ports":[{"name":"api-http","protocol":"TCP","targetPort":8080,"port":8080,"nodePort":30088}]}}'
-p '{"spec":{"type":"NodePort","ports":[{"name":"api-https","protocol":"TCP","targetPort":8443,"port":8443,"nodePort":30444}]}}'
9 changes: 9 additions & 0 deletions ci/deploy-volume-populator.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash

set -euo pipefail

echo ""
echo "Installing VolumePopulator CRD"
echo "=============================="

kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/volume-data-source-validator/v1.0.1/client/config/crd/populator.storage.k8s.io_volumepopulators.yaml
6 changes: 2 additions & 4 deletions ci/start-console.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ PLUGIN_URL=${PLUGIN_URL:-"http://localhost:9001"}
CONTAINER_NETWORK_TYPE=${CONTAINER_NETWORK_TYPE:-"host"}
CONSOLE_IMAGE=${CONSOLE_IMAGE:-"quay.io/openshift/origin-console:latest"}
CONSOLE_PORT=${CONSOLE_PORT:-9000}
INVENTORY_SERVER_HOST=${INVENTORY_SERVER_HOST:-"http://localhost:30088"}
MUST_GATHER_API_SERVER_HOST=${MUST_GATHER_API_SERVER_HOST:-"http://localhost:30089"}
INVENTORY_SERVER_HOST=${INVENTORY_SERVER_HOST:-"https://localhost:30444"}
MUST_GATHER_API_SERVER_HOST=${MUST_GATHER_API_SERVER_HOST:-"https://localhost:30445"}

if [[ ${CONSOLE_IMAGE} =~ ^localhost/ ]]; then
PULL_POLICY="never"
Expand All @@ -27,8 +27,6 @@ if podman container exists ${CONSOLE_CONTAINER_NAME}; then
exit 1
fi

kubectl port-forward -n ${FORKLIFT_NAMESPACE} service/forklift-inventory 65300:8443 &

# Base setup for the bridge
if [[ $@ == *'--auth'* ]]; then
setup_bridge_for_openshift_oauth
Expand Down
2 changes: 0 additions & 2 deletions ci/stop-console.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ set -euo pipefail

CONSOLE_CONTAINER_NAME=okd-console

pkill kubectl

# Test is console already running
if podman container exists ${CONSOLE_CONTAINER_NAME}; then
podman container stop ${CONSOLE_CONTAINER_NAME}
Expand Down
50 changes: 50 additions & 0 deletions ci/yaml/okd-console-tls-cert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: console
namespace: konveyor-forklift
automountServiceAccountToken: true
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: console-console-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: console
namespace: konveyor-forklift
---
apiVersion: v1
kind: Secret
metadata:
name: console
namespace: konveyor-forklift
annotations:
kubernetes.io/service-account.name: console
type: kubernetes.io/service-account-token
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: console-certificate
namespace: konveyor-forklift
spec:
commonName: console-certificate
dnsNames:
- console.konveyor-forklift.svc
- console.konveyor-forklift.svc.cluster.local
isCA: true
issuerRef:
group: cert-manager.io
kind: Issuer
name: forklift-issuer
privateKey:
algorithm: ECDSA
size: 256
secretName: console-serving-cert

119 changes: 119 additions & 0 deletions ci/yaml/okd-console-tls.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
apiVersion: v1
kind: Namespace
metadata:
name: konveyor-forklift
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: forklift-console-plugin
namespace: konveyor-forklift
labels:
app: forklift-console-plugin
spec:
replicas: 1
selector:
matchLabels:
app: forklift-console-plugin
template:
metadata:
labels:
app: forklift-console-plugin
spec:
containers:
- name: forklift-console-plugin
image: quay.io/kubev2v/forklift-console-plugin:latest
ports:
- containerPort: 8080
protocol: TCP
imagePullPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: forklift-console-plugin
namespace: konveyor-forklift
labels:
app: forklift-console-plugin
spec:
ports:
- name: 8080-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
app: forklift-console-plugin
type: ClusterIP
sessionAffinity: None
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: console
namespace: konveyor-forklift
spec:
replicas: 1
selector:
matchLabels:
name: console
template:
metadata:
labels:
name: console
spec:
serviceAccountName: console
volumes:
- name: forklift-cert
secret:
secretName: forklift-cert
- name: console-serving-cert
secret:
secretName: console-serving-cert
containers:
- name: console
image: quay.io/openshift/origin-console:latest
volumeMounts:
- name: console-serving-cert
mountPath: /var/run/secrets/forklift
- name: forklift-cert
mountPath: /etc/ssl/certs/forklift-ca.crt
subPath: ca.crt
env:
- name: BRIDGE_LISTEN
value: "https://0.0.0.0:9000"
- name: BRIDGE_TLS_CERT_FILE
value: /var/run/secrets/forklift/tls.crt
- name: BRIDGE_TLS_KEY_FILE
value: /var/run/secrets/forklift/tls.key
- name: BRIDGE_PLUGINS
value: forklift-console-plugin=http://forklift-console-plugin.konveyor-forklift.svc.cluster.local:8080
- name: BRIDGE_PLUGIN_PROXY
value: |
{"services":[
{
"consoleAPIPath":"/api/proxy/plugin/forklift-console-plugin/forklift-inventory/",
"endpoint":"https://forklift-inventory.konveyor-forklift.svc.cluster.local:8443",
"authorize":true
},
{
"consoleAPIPath":"/api/proxy/plugin/forklift-console-plugin/must-gather-api/",
"endpoint":"https://must-gather-api.konveyor-forklift.svc.cluster.local:8443",
"authorize":true
}]}
---
apiVersion: v1
kind: Service
metadata:
name: console
namespace: konveyor-forklift
spec:
ports:
- name: api-https
nodePort: 30443
port: 9000
protocol: TCP
targetPort: 9000
selector:
name: console
sessionAffinity: None
type: NodePort
Loading

0 comments on commit 446fef1

Please sign in to comment.