diff --git a/.github/workflows/check-sidecar-tasks.sh b/.github/workflows/check-sidecar-tasks.sh deleted file mode 100755 index 040b4785..00000000 --- a/.github/workflows/check-sidecar-tasks.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -eu - -make sidecar-tasks -if ! git diff --quiet deploy/central/tasks-chart/templates; then - echo "Sidecar Tasks are not up-to-date! Run 'make sidecar-tasks' to update." - exit 1 -else - echo "Sidecar Tasks are up-to-date." -fi diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 69bb8c8a..3ea14672 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -110,17 +110,11 @@ jobs: - name: Show disk space run: df -h - - - name: Install cluster tasks - run: make install-ods-tasks-kind - name: Setup Go 1.16 uses: actions/setup-go@v2 with: go-version: 1.16 - - - name: Check if sidecar tasks are up-to-date - run: ./.github/workflows/check-sidecar-tasks.sh - name: Check if docs are up-to-date run: ./.github/workflows/check-docs.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index fd7b6671..1f693fb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ listed in the changelog. - Log artifact URL after upload ([#384](https://github.com/opendevstack/ods-pipeline/issues/384)) - Remove Tekton Triggers, moving the required functionality it provided into the new ODS pipeline manager ([#438](https://github.com/opendevstack/ods-pipeline/issues/438)) - Use UBI8 provided Python 3.9 toolset image ([#457](https://github.com/opendevstack/ods-pipeline/issues/457)) +- Change installation mode from centralized to local/namespaced ([#404](https://github.com/opendevstack/ods-pipeline/pull/404)) ### Fixed - Cannot enable debug mode in some tasks ([#377](https://github.com/opendevstack/ods-pipeline/issues/377)) diff --git a/Makefile b/Makefile index 71bb1bcb..abc3a5f3 100644 --- a/Makefile +++ b/Makefile @@ -33,16 +33,12 @@ lint-go: ## Run golangci-lint. .PHONY: lint-go lint-shell: ## Run shellcheck. - shellcheck scripts/*.sh build/package/scripts/* deploy/*/*.sh + shellcheck scripts/*.sh build/package/scripts/* deploy/*.sh .PHONY: lint-shell ##@ Building -sidecar-tasks: ## Render sidecar task variants. - go run cmd/sidecar-tasks/main.go -.PHONY: sidecar-tasks - -docs: sidecar-tasks ## Render documentation for tasks. +docs: ## Render documentation for tasks. go run cmd/docs/main.go .PHONY: docs @@ -92,7 +88,7 @@ clear-tmp-workspaces: ## Clear temporary workspaces created in testruns. ##@ KinD (local development environment) -prepare-local-env: create-kind-with-registry build-and-push-images install-tekton-pipelines run-bitbucket run-nexus run-sonarqube install-ods-tasks-kind ## Prepare local environment from scratch. +prepare-local-env: create-kind-with-registry build-and-push-images install-tekton-pipelines run-bitbucket run-nexus run-sonarqube ## Prepare local environment from scratch. .PHONY: prepare-local-env create-kind-with-registry: ## Create KinD cluster with local registry. @@ -107,10 +103,6 @@ build-and-push-images: ## Build and push images to local registry. cd scripts && ./build-and-push-images.sh .PHONY: build-and-push-images -install-ods-tasks-kind: ## KinD only! Apply ODS ClusterTask manifests in KinD - cd scripts && ./install-ods-tasks-kind.sh -.PHONY: install-ods-tasks-kind - run-bitbucket: ## Run Bitbucket server (using timebomb license, in "kind" network). cd scripts && ./run-bitbucket.sh .PHONY: run-bitbucket @@ -130,7 +122,6 @@ run-sonarqube: ## Run SonarQube server (in "kind" network). recreate-kind-cluster: ## Recreate KinD cluster including Tekton tasks. cd scripts && ./kind-with-registry.sh --recreate cd scripts && ./install-tekton-pipelines.sh - cd scripts && ./install-ods-tasks-kind.sh .PHONY: recreate-kind-cluster stop-local-env: ## Stop local environment. @@ -143,15 +134,7 @@ start-local-env: ## Restart stopped local environment. ##@ OpenShift -install-ods-central: ## OpenShift only! Apply ODS BuildConfig, ImageStream and ClusterTask manifests -ifeq ($(strip $(namespace)),) - @echo "Argument 'namespace' is required, e.g. make install-ods-central namespace=ods" - @exit 1 -endif - cd scripts && ./install-ods-central-resources.sh -n $(namespace) -.PHONY: install-ods-central - -start-ods-central-builds: ## OpenShift only! Start builds for each ODS BuildConfig +start-ods-builds: ## OpenShift only! Start builds for each ODS BuildConfig oc start-build ods-buildah oc start-build ods-finish oc start-build ods-go-toolset @@ -162,9 +145,9 @@ start-ods-central-builds: ## OpenShift only! Start builds for each ODS BuildConf oc start-build ods-start oc start-build ods-node16-typescript-toolset oc start-build ods-pipeline-manager -.PHONY: start-ods-central-builds +.PHONY: start-ods-builds -##@ User Installation +##@ Installation install-cd-namespace: ## Install resources in CD namespace via Helm. ifeq ($(strip $(namespace)),) diff --git a/README.md b/README.md index fd87afb4..c8dc3502 100644 --- a/README.md +++ b/README.md @@ -14,19 +14,7 @@ ODS Pipeline is well suited for regulated development (e.g. medical device softw ## Documentation -The documentation provided by ODS pipeline has three audiences: - -* **Admins** install and maintain a central ODS pipeline installation in an OpenShift cluster that can be used by many users. - -* **Users** consume an existing central ODS pipeline installation. Users install ODS pipeline resources into a namespace they own to run CI/CD pipelines for their repositories. - -* **Contributors** work on the ODS pipeline project itself, for example by improving existing tasks, adding new ones, updating documentation, etc. - -**Admin Guide** -* [Installation & Updating](/docs/admin-installation.adoc) - -**User Guide** -* [Installation & Updating](/docs/user-installation.adoc) +* [Installation & Updating](/docs/installation.adoc) * [Getting Started](/docs/getting-started.adoc) * [ODS.YAML Reference](/docs/ods-configuration.adoc) * [Task Reference](/docs/tasks) @@ -37,13 +25,6 @@ The documentation provided by ODS pipeline has three audiences: * [Example Project](/docs/example-project.adoc) * [FAQ](https://github.com/opendevstack/ods-pipeline/wiki/FAQ) -**Contributor Guide** -* [Repository Layout](/docs/repository-layout.adoc) -* [Development & Running Tests](/docs/development.adoc) -* [Artifacts](/docs/artifacts.adoc) -* [Creating an ODS task](/docs/creating-an-ods-task.adoc) -* [Releasing a new version](/docs/releasing.adoc) - This repository also hosts the design documents that describe ODS pipeline more formally. Those design documents provide more detail and background on goals, requirements and architecture decisions and may be useful for all audiences. * [Stakeholder Requirements](/docs/design/stakeholder-requirements.adoc) @@ -62,3 +43,11 @@ For OpenShift Pipelines releases and its relationship to Tekton and OpenShift ve |---|---|---| | [0.2](https://github.com/opendevstack/ods-pipeline/milestone/2) | 1.5 | 4.0.0 | | [0.1](https://github.com/opendevstack/ods-pipeline/milestone/1) | 1.5 | 4.0.0 | + +## Contributing + +* [Repository Layout](/docs/repository-layout.adoc) +* [Development & Running Tests](/docs/development.adoc) +* [Artifacts](/docs/artifacts.adoc) +* [Creating an ODS task](/docs/creating-an-ods-task.adoc) +* [Releasing a new version](/docs/releasing.adoc) diff --git a/build/package/Dockerfile.buildah b/build/package/Dockerfile.buildah index 93090f21..a9963303 100644 --- a/build/package/Dockerfile.buildah +++ b/build/package/Dockerfile.buildah @@ -14,8 +14,6 @@ RUN cd cmd/build-push-image && CGO_ENABLED=0 go build -o /usr/local/bin/ods-buil # Copied from https://catalog.redhat.com/software/containers/detail/5dca3d76dd19c71643b226d5?container-tabs=dockerfile&tag=8.4&push_date=1621383358000. FROM registry.access.redhat.com/ubi8:8.4 -ARG aquasecScannerUrl - ENV BUILDAH_VERSION=1.23 \ SKOPEO_VERSION=1.5 @@ -28,16 +26,6 @@ RUN useradd build; dnf -y module enable container-tools:rhel8; dnf -y update; dn RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /etc/containers/storage.conf RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock -# Optionally install Aqua scanner. -RUN if [ -z $aquasecScannerUrl ] ; then echo 'Skipping Aqua scanner installation!' ; else echo 'Installing Aqua scanner... getting binary from' $aquasecScannerUrl \ - && curl -v -L $aquasecScannerUrl -o aquasec \ - && mv aquasec /usr/local/bin/ \ - && chmod +x /usr/local/bin/aquasec \ - && echo 'Aqua scanner version:' \ - && aquasec version \ - && echo 'Aqua scanner installation completed!'; \ - fi - # Set up environment variables to note that this is not starting with usernamespace and default to # isolate the filesystem with chroot. ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot diff --git a/cmd/docs/main.go b/cmd/docs/main.go index 1e021293..4dfe5f20 100644 --- a/cmd/docs/main.go +++ b/cmd/docs/main.go @@ -10,7 +10,7 @@ import ( func main() { err := docs.RenderTasks( - filepath.Join(projectpath.Root, "deploy/central/tasks-chart"), + filepath.Join(projectpath.Root, "deploy/ods-pipeline/charts/tasks"), filepath.Join(projectpath.Root, "docs/tasks"), ) if err != nil { diff --git a/cmd/pipeline-manager/main.go b/cmd/pipeline-manager/main.go index 7151e1c4..77889429 100644 --- a/cmd/pipeline-manager/main.go +++ b/cmd/pipeline-manager/main.go @@ -25,7 +25,7 @@ const ( tokenEnvVar = "ACCESS_TOKEN" webhookSecretEnvVar = "WEBHOOK_SECRET" taskKindEnvVar = "ODS_TASK_KIND" - taskKindDefault = "ClusterTask" + taskKindDefault = "Task" taskSuffixEnvVar = "ODS_TASK_SUFFIX" storageProvisionerEnvVar = "ODS_STORAGE_PROVISIONER" storageClassNameEnvVar = "ODS_STORAGE_CLASS_NAME" diff --git a/cmd/sidecar-tasks/main.go b/cmd/sidecar-tasks/main.go deleted file mode 100644 index 463c98c7..00000000 --- a/cmd/sidecar-tasks/main.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "log" - "strings" - - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/yaml" -) - -func main() { - tasksWithSidecars := []string{ - "ods-build-go", - "ods-build-gradle", - "ods-build-python", - "ods-build-typescript", - } - t, err := parseTasks(tasksWithSidecars) - if err != nil { - log.Fatal(err) - } - adjustTasks(t) - err = writeTasks(t) - if err != nil { - log.Fatal(err) - } -} - -func parseTasks(taskNames []string) (map[string]*tekton.ClusterTask, error) { - tasks := map[string]*tekton.ClusterTask{} - for _, task := range taskNames { - fmt.Printf("Parsing task %s ...\n", task) - b, err := ioutil.ReadFile(fmt.Sprintf("deploy/central/tasks-chart/templates/task-%s.yaml", task)) - if err != nil { - return nil, err - } - var t tekton.ClusterTask - err = yaml.Unmarshal(b, &t) - if err != nil { - return nil, err - } - tasks[task] = &t - } - return tasks, nil -} - -func adjustTasks(tasks map[string]*tekton.ClusterTask) { - for name, t := range tasks { - fmt.Printf("Adding sidecar to task %s ...\n", name) - cleanName := strings.Replace(t.Name, "{{default \"ods\" .Values.taskPrefix}}", "ods", 1) - cleanName = strings.Replace(cleanName, "{{.Values.taskSuffix}}", "", 1) - t.Name = strings.Replace(t.Name, "{{.Values.taskSuffix}}", "-with-sidecar{{.Values.taskSuffix}}", 1) - t.Spec.Description = t.Spec.Description + ` -**Sidecar variant!** Use this task if you need to run a container next to the build task. -For example, this could be used to run a database to allow for integration tests. -The sidecar image to must be supplied via ` + "`sidecar-image`" + `. -Apart from the sidecar, the task is an exact copy of ` + "`" + cleanName + "`" + `.` - t.Spec.Params = append(t.Spec.Params, tekton.ParamSpec{ - Name: "sidecar-image", - Description: "Image to use for sidecar", - Type: tekton.ParamTypeString, - }) - t.Spec.Sidecars = []tekton.Sidecar{ - { - Container: corev1.Container{ - Name: "sidecar", - Image: "$(params.sidecar-image)", - }, - }, - } - } -} - -func writeTasks(tasks map[string]*tekton.ClusterTask) error { - for name, t := range tasks { - fmt.Printf("Writing sidecar task %s ...\n", name) - out, err := yaml.Marshal(t) - if err != nil { - return err - } - out = append([]byte("# Generated by cmd/sidecar-tasks/main.go; DO NOT EDIT.\n"), out...) - err = ioutil.WriteFile( - fmt.Sprintf("deploy/central/tasks-chart/templates/task-%s-with-sidecar.yaml", name), - out, 0644, - ) - if err != nil { - return err - } - } - return nil -} diff --git a/deploy/cd-namespace/.gitignore b/deploy/.gitignore similarity index 100% rename from deploy/cd-namespace/.gitignore rename to deploy/.gitignore diff --git a/deploy/README.md b/deploy/README.md index 45150405..30e9b66c 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -1,9 +1,27 @@ -# deploy +# Deployment This directory contains container orchestration deployment configurations and templates. -Manifests in `central` are applied once per cluster by an ODS administrator. +Manifests in `ods-pipeline` are applied once per project by a project administrator. -Manifests in `cd-namespace` are applied once per cd-namespace by an ODS user. -The resulting resources in the `cd-namespace` use the resources (e.g. the images) -installed centrally by an ODS administrator. +## Subcharts + +The `tasks`, `images` and `setup` subcharts are maintained in https://github.com/opendevstack/ods-pipeline, and may be used by project admins to control the deployment of ODS pipeline resources in the respective project namespace in OpenShift. + +### Subcharts Contents + +The resources are defined using Helm: +* `BuildConfig` and `ImageStream` resources (in the `images` subchart) +* `Task` resources (in `tasks` subchart) +* `ConfigMap` and `Secret` resources used by ODS tasks (in `setup` subchart) +* ODS pipeline manager (`Service`/`Deployment`) (in `setup` subchart) + +The resources of the `images` subchart are only applicable for OpenShift clusters. The subcharts may individually be enabled or disabled via the umbrella chart's `values.yaml`. + +### Versioning + +In a KinD cluster there are no versions. Images use the implicit `latest` tag. That makes testing and local development easy. + +In OpenShift, however, images and tasks are versioned. That provides the greatest stability. + +Remember to adjust the `values.yaml` files every time there is a new version. diff --git a/deploy/cd-namespace/README.md b/deploy/cd-namespace/README.md deleted file mode 100644 index 73018c27..00000000 --- a/deploy/cd-namespace/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Deployment of "cd-namespace" - -This directory is maintained in https://github.com/opendevstack/ods-pipeline, and may be used by ODS pipeline users to control the deployment of ODS pipeline resources in their "cd namespace" in OpenShift. For this purpose, this directory may be added to another Git repository via `git subtree` as explained in the [User Installation Guide](/docs/user-installation.adoc). - -## Directory Contents - -The resources are defined using Helm: -* `ConfigMap` and `Secret` resources used by ODS tasks -* ODS pipeline manager (`Service`/`Deployment`) diff --git a/deploy/cd-namespace/chart/.helmignore b/deploy/cd-namespace/chart/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/deploy/cd-namespace/chart/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/cd-namespace/chart/secrets.yaml b/deploy/cd-namespace/chart/secrets.yaml deleted file mode 100644 index ff171206..00000000 --- a/deploy/cd-namespace/chart/secrets.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Bitbucket -# Bitbucket personal access token (PAT) in clear text. This token needs to be -# created in Bitbucket for the user identified by "bitbucketUsername". -# The user requires write permissions in order to set build status or add code -# insights on commits. -bitbucketAccessToken: '' -# Shared secret between webhooks in Bitbucket and the event listener. Create -# a random secret and enter it here, then use it later on as the secret in any -# webhook you setup. The value needs to be in clear text. -bitbucketWebhookSecret: '' - -# Nexus -# Nexus password for the user identified by "nexusUsername" in clear text. -nexusPassword: '' - -# Sonar -# SonarQube password for the user identified by "sonarUsername" in clear text. -sonarAuthToken: '' - -# Aqua -# Aqua password for the user identified by "aquaUsername" in clear text. -# Leave empty when not using Aqua. -aquaPassword: '' diff --git a/deploy/cd-namespace/chart/values.kind.yaml b/deploy/cd-namespace/chart/values.kind.yaml deleted file mode 100644 index 5cdd7419..00000000 --- a/deploy/cd-namespace/chart/values.kind.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# General -serviceAccountName: 'pipeline' - -# Cluster -consoleUrl: 'http://example.com' - -# Pipeline Manager -pipelineManager: - storageProvisioner: '' - storageClassName: 'standard' - storageSize: '2Gi' - replicaCount: 1 - image: - registry: localhost:5000 - namespace: ods - repository: ods-pipeline-manager - pullPolicy: Always - tag: "latest" - resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi - -# Notification Webhook -notification: - # notifications are disabled by default, i.e. the ConfigMap won't be installed - enabled: false - # URL of the configured webhook - url: 'http://example.com' - # The HTTP method to be used - method: 'POST' - # The HTTP content type header - contentType: 'application/json' - # Specify the outcomes you want to be notified of (allowed values: c.f. - # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) - notifyOnStatus: - - 'Failed' - # Template to be processed and accepted by the configured webhook in use - # Below example might work for Microsoft Teams - requestTemplate: |- - { - "@type": "MessageCard", - "@context": "http://schema.org/extensions", - "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, - "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "sections": [ - { - "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "activitySubtitle": "On Project {{.ODSContext.Project}}", - "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", - "facts": [ - { - "name": "GitRef", - "value": "{{.ODSContext.GitRef}}" - }, - { - "name": "Environment", - "value": "{{.ODSContext.Environment}}" - } - ], - "markdown": true - } - ], - "potentialAction": [ - { - "@type": "OpenUri", - "name": "Go to PipelineRun", - "targets": [ - { - "os": "default", - "uri": "{{.PipelineRunURL}}" - } - ] - } - ] - } diff --git a/deploy/cd-namespace/chart/values.yaml b/deploy/cd-namespace/chart/values.yaml deleted file mode 100644 index 01292d48..00000000 --- a/deploy/cd-namespace/chart/values.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# General -# Serviceaccount name to use for pipeline resources. -serviceAccountName: 'pipeline' -# Whether to enable debug mode -debug: 'false' - -# Bitbucket -# Bitbucket URL (including scheme). Example: https://bitbucket.example.com. -bitbucketUrl: '' -# Bitbucket username. Example: cd_user. -bitbucketUsername: '' - -# Nexus -# Nexus URL (including scheme). Example: https://nexus.example.com. -nexusUrl: '' -# Nexus username. Example: developer. -nexusUsername: '' -# Nexus repository for temporary artifacts (stage = dev) -nexusTemporaryRepository: 'ods-temporary-artifacts' -# Nexus repository for permanent artifacts (stage = qa|prod) -nexusPermanentRepository: 'ods-permanent-artifacts' - -# Sonar -# SonarQube URL (including scheme). Example: https://sonarqube.example.com. -sonarUrl: '' -# SonarQube username. Example: developer. -sonarUsername: '' -# SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' -sonarEdition: 'community' - -# Aqua -# Aqua URL (including scheme). Example: https://aqua.example.com. -# Leave empty when not using Aqua. -aquaUrl: '' -# Aqua registry name. -# Leave empty when not using Aqua. -aquaRegistry: '' -# Aqua username. Example: developer. -# Leave empty when not using Aqua. -aquaUsername: '' - -# Cluster -# URL (including scheme) of the OpenShift Web Console. -consoleUrl: 'http://example.com' - -# Notification Webhook -notification: - # notifications are disabled by default, i.e. the ConfigMap won't be installed - enabled: false - # URL of the configured webhook - url: 'http://example.com' - # The HTTP method to be used - method: 'POST' - # The HTTP content type header - contentType: 'application/json' - # Specify the outcomes you want to be notified of (allowed values: c.f. - # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) - notifyOnStatus: - - 'Failed' - # Template to be processed and accepted by the configured webhook in use - # Below example might work for Microsoft Teams - requestTemplate: |- - { - "@type": "MessageCard", - "@context": "http://schema.org/extensions", - "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, - "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "sections": [ - { - "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "activitySubtitle": "On Project {{.ODSContext.Project}}", - "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", - "facts": [ - { - "name": "GitRef", - "value": "{{.ODSContext.GitRef}}" - }, - { - "name": "Environment", - "value": "{{.ODSContext.Environment}}" - } - ], - "markdown": true - } - ], - "potentialAction": [ - { - "@type": "OpenUri", - "name": "Go to PipelineRun", - "targets": [ - { - "os": "default", - "uri": "{{.PipelineRunURL}}" - } - ] - } - ] - } - -# Pipeline(Run) Pruning -# Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. -# Must be at least 1. -pipelineRunMinKeepHours: '48' -# Maximum number of pipeline runs to keep per stage (stages: DEV, QA, PROD). -# Must be at least 1. -pipelineRunMaxKeepRuns: '20' - -# Pipeline Manager -pipelineManager: - # PVC (used for the pipeline workspace) - # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. - storageProvisioner: 'kubernetes.io/aws-ebs' - # Storage class. On AWS backed clusters, use 'gp2'. - storageClassName: 'gp2' - # Storage size. Defaults to 2Gi unless set explicitly here. - storageSize: '5Gi' - # Number of replicas to run for the pipeline manager. - replicaCount: 1 - image: - # Image registry from which to pull the pipeline manager container image. - registry: image-registry.openshift-image-registry.svc:5000 - # Namespace from which to pull the pipeline manager container image. - # If not given, the image is pulled from the release namespace. - namespace: ods - # Repository (ImageStream) from which to pull the pipeline manager - # container image. - # If not given, the image name equals the chart name. - repository: ods-pipeline-manager - # Pull policy. - pullPolicy: Always - # Image tag to pull. - # If not given, defaults to the chart appVersion. - # tag: "0.2.0" - # Deployment pod resources. Typically these settings should not need to change. - resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi diff --git a/deploy/central/.gitignore b/deploy/central/.gitignore deleted file mode 100644 index 231da07d..00000000 --- a/deploy/central/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.dec diff --git a/deploy/central/README.md b/deploy/central/README.md deleted file mode 100644 index 88a978c7..00000000 --- a/deploy/central/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Deployment of central "ODS namespace" - -This directory is maintained in https://github.com/opendevstack/ods-pipeline, and may be used by ODS pipeline admins to control the deployment of ODS pipeline resources in the centrals "ODS namespace" in OpenShift. For this purpose, this directory may be added to another Git repository via `git subtree` as explained in the [Admin Installation Guide](/docs/admin-installation.adoc). - -## Directory Contents - -The resources are defined using Helm: -* `BuildConfig` and `ImageStream` resources (in folder `images-chart`) -* `ClusterTask` resources (in folder `tasks-chart`) - -The resources under `images-chart` are only applicable for OpenShift clusters. - -## Versioning - -In a KinD cluster there are no versions. Images use the implicit `latest` tag. That makes testing and local development easy. - -In OpenShift, however, images and tasks are versioned. That provides the greatest stability. - -Remember to adjust the `values.yaml` files every time there is a new version. diff --git a/deploy/central/images-chart/.gitignore b/deploy/central/images-chart/.gitignore deleted file mode 100644 index bd1a3d34..00000000 --- a/deploy/central/images-chart/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -values.custom.yaml -values.generated.yaml diff --git a/deploy/central/images-chart/secrets.yaml b/deploy/central/images-chart/secrets.yaml deleted file mode 100644 index 1adc5881..00000000 --- a/deploy/central/images-chart/secrets.yaml +++ /dev/null @@ -1,3 +0,0 @@ -# Aqua -# URL to download aqua-scanner binary. The URL must contain basic authentication. -aquasecScannerUrl: '' diff --git a/deploy/central/images-chart/templates/bc-ods-finish.yaml b/deploy/central/images-chart/templates/bc-ods-finish.yaml deleted file mode 100644 index de987371..00000000 --- a/deploy/central/images-chart/templates/bc-ods-finish.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-finish -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-finish:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.finish - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/ubi-minimal:8.4' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-go-toolset.yaml b/deploy/central/images-chart/templates/bc-ods-go-toolset.yaml deleted file mode 100644 index 1f3c32c8..00000000 --- a/deploy/central/images-chart/templates/bc-ods-go-toolset.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-go-toolset -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-go-toolset:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.go-toolset - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/go-toolset:1.16.12' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-gradle-toolset.yaml b/deploy/central/images-chart/templates/bc-ods-gradle-toolset.yaml deleted file mode 100644 index 1d80e2cc..00000000 --- a/deploy/central/images-chart/templates/bc-ods-gradle-toolset.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-gradle-toolset -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-gradle-toolset:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.gradle-toolset - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/openjdk-17:1.10' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-helm.yaml b/deploy/central/images-chart/templates/bc-ods-helm.yaml deleted file mode 100644 index 00e61230..00000000 --- a/deploy/central/images-chart/templates/bc-ods-helm.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-helm -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-helm:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.helm - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/ubi-minimal:8.4' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-node16-typescript-toolset.yaml b/deploy/central/images-chart/templates/bc-ods-node16-typescript-toolset.yaml deleted file mode 100644 index de162158..00000000 --- a/deploy/central/images-chart/templates/bc-ods-node16-typescript-toolset.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-node16-typescript-toolset -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-node16-typescript-toolset:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.node16-typescript-toolset - from: - kind: DockerImage - name: "registry.redhat.io/ubi8/nodejs-16:1" - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-pipeline-manager.yaml b/deploy/central/images-chart/templates/bc-ods-pipeline-manager.yaml deleted file mode 100644 index db768cf1..00000000 --- a/deploy/central/images-chart/templates/bc-ods-pipeline-manager.yaml +++ /dev/null @@ -1,34 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-pipeline-manager -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-pipeline-manager:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.pipeline-manager - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/ubi-minimal:8.4' - buildArgs: - - name: taskKind - value: '{{default "ClusterTask" .Values.taskKind}}' - - name: taskSuffix - value: '{{.Values.taskSuffix}}' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-python-toolset.yaml b/deploy/central/images-chart/templates/bc-ods-python-toolset.yaml deleted file mode 100644 index 7b711205..00000000 --- a/deploy/central/images-chart/templates/bc-ods-python-toolset.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-python-toolset -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-python-toolset:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.python-toolset - from: - kind: DockerImage - name: "registry.redhat.io/ubi8/ubi-minimal:8.4" - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-sonar.yaml b/deploy/central/images-chart/templates/bc-ods-sonar.yaml deleted file mode 100644 index 56022eb4..00000000 --- a/deploy/central/images-chart/templates/bc-ods-sonar.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-sonar -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-sonar:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.sonar - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/ubi-minimal:8.4' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/templates/bc-ods-start.yaml b/deploy/central/images-chart/templates/bc-ods-start.yaml deleted file mode 100644 index 699d9147..00000000 --- a/deploy/central/images-chart/templates/bc-ods-start.yaml +++ /dev/null @@ -1,29 +0,0 @@ -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: ods-start -spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: 'ods-start:{{.Values.imageTag}}' - resources: {} - successfulBuildsHistoryLimit: 5 - failedBuildsHistoryLimit: 5 - strategy: - type: Docker - dockerStrategy: - dockerfilePath: build/package/Dockerfile.start - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8/ubi-minimal:8.4' - pullSecret: - name: registry.redhat.io - postCommit: {} - source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' - runPolicy: Serial diff --git a/deploy/central/images-chart/values.yaml b/deploy/central/images-chart/values.yaml deleted file mode 100644 index 59c525d1..00000000 --- a/deploy/central/images-chart/values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Tasks -# Image tag to use for images referenced by tasks. -imageTag: 0.2.0 -# Suffix to append to the task name. -taskSuffix: -v0-2-0 - -# Git -odsPipelineGitRepoUri: https://github.com/opendevstack/ods-pipeline -odsPipelineGitRepoRef: v0.2.0 diff --git a/deploy/central/install.sh b/deploy/central/install.sh deleted file mode 100755 index 19aff155..00000000 --- a/deploy/central/install.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash -set -ue - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -VERBOSE="false" -DRY_RUN="false" -DIFF="true" -NAMESPACE="" -RELEASE_NAME="" -VALUES_FILE="" -CHART_DIR="" -CHART="" - -while [[ "$#" -gt 0 ]]; do - case $1 in - - -v|--verbose) VERBOSE="true";; - - -n|--namespace) NAMESPACE="$2"; shift;; - -n=*|--namespace=*) NAMESPACE="${1#*=}";; - - -f|--values) VALUES_FILE="$2"; shift;; - -f=*|--values=*) VALUES_FILE="${1#*=}";; - - -c|--chart) CHART="$2"; shift;; - -c=*|--chart=*) CHART="${1#*=}";; - - --no-diff) DIFF="false";; - - --dry-run) DRY_RUN="true";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -cd "${SCRIPT_DIR}" - -VALUES_FILES=$(echo "$VALUES_FILE" | tr "," "\n") -VALUES_ARGS=() -for valueFile in ${VALUES_FILES}; do - VALUES_ARGS+=("--values=${valueFile}") -done - -if [ -z "${CHART}" ]; then - echo "--chart is required" - exit 1 -elif [ -z "${NAMESPACE}" ]; then - echo "--namespace is required" - exit 1 -elif [ "${CHART}" == "tasks" ]; then - CHART_DIR="./tasks-chart" - # Add the taskSuffix into the Helm release name so that we get one Helm - # release per version, which avoids deleting old tasks when new ones are - # installed. - greppedFile="${CHART_DIR}/values.yaml" - if [ -f values.tasks.yaml ]; then - greppedFile="values.tasks.yaml" - fi - RELEASE_SUFFIX=$(grep "taskSuffix:" "${greppedFile}" | awk '{print $NF}' | tr -d "'\"") - RELEASE_NAME="ods-pipeline-tasks${RELEASE_SUFFIX}" -elif [ "${CHART}" == "images" ]; then - CHART_DIR="./images-chart" - RELEASE_NAME="ods-pipeline-images" -else - echo "--chart is not valid. Use 'tasks' or 'images'." - exit 1 -fi - - - -if [ "${VERBOSE}" == "true" ]; then - set -x -fi - -DIFF_UPGRADE_ARGS=(diff upgrade) -UPGRADE_ARGS=(upgrade) -if helm plugin list | grep secrets &> /dev/null; then - DIFF_UPGRADE_ARGS=(secrets diff upgrade) - UPGRADE_ARGS=(secrets upgrade) -fi - -echo "Installing Helm release ${RELEASE_NAME} ..." -if [ "${DIFF}" == "true" ]; then - if helm -n "${NAMESPACE}" \ - "${DIFF_UPGRADE_ARGS[@]}" --install --detailed-exitcode \ - "${VALUES_ARGS[@]}" \ - ${RELEASE_NAME} ${CHART_DIR}; then - echo "Helm release already up-to-date." - else - if [ "${DRY_RUN}" == "true" ]; then - echo "(skipping in dry-run)" - else - helm -n "${NAMESPACE}" \ - "${UPGRADE_ARGS[@]}" --install \ - "${VALUES_ARGS[@]}" \ - ${RELEASE_NAME} ${CHART_DIR} - fi - fi -else - if [ "${DRY_RUN}" == "true" ]; then - echo "(skipping in dry-run)" - else - NAMESPACE_FLAG="" - if [ -n "${NAMESPACE}" ]; then - NAMESPACE_FLAG="-n ${NAMESPACE}" - fi - # shellcheck disable=SC2086 - helm ${NAMESPACE_FLAG} \ - "${UPGRADE_ARGS[@]}" --install \ - "${VALUES_ARGS[@]}" \ - ${RELEASE_NAME} ${CHART_DIR} - fi -fi diff --git a/deploy/central/tasks-chart/.gitignore b/deploy/central/tasks-chart/.gitignore deleted file mode 100644 index bd1a3d34..00000000 --- a/deploy/central/tasks-chart/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -values.custom.yaml -values.generated.yaml diff --git a/deploy/central/tasks-chart/templates/task-ods-build-go-with-sidecar.yaml b/deploy/central/tasks-chart/templates/task-ods-build-go-with-sidecar.yaml deleted file mode 100644 index 9d607c9e..00000000 --- a/deploy/central/tasks-chart/templates/task-ods-build-go-with-sidecar.yaml +++ /dev/null @@ -1,165 +0,0 @@ -# Generated by cmd/sidecar-tasks/main.go; DO NOT EDIT. -apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' -metadata: - creationTimestamp: null - name: '{{default "ods" .Values.taskPrefix}}-build-go-with-sidecar{{.Values.taskSuffix}}' -spec: - description: |- - Builds Go (module) applications. - - The exact build recipe can be found at - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[build/package/scripts/build-go.sh]. - - The following provides an overview of the performed steps: - - - Source files are checked to be formatted with `gofmt`. - - The go module cache is configured to be on the cache location of the PVC by setting environment variable `GOMODCACHE` to `.ods-cache/deps/gomod` (see link:https://go.dev/ref/mod#module-cache[go module cache]). - - `golanci-lint` is run. The linter can be configured via a - config file as described in the - link:https://golangci-lint.run/usage/configuration/[configuration documentation]. - - Tests are executed. A potential `vendor` directory is excluded. Test - results are converted into xUnit format. If test artifacts are already present for - the current Git commit SHA, testing is skipped. - - Application binary (named `app`) is built and placed into the directory - specified by `output-dir`. - - Finally, the application source code is scanned by SonarQube. - Default SonarQube project properties are provided unless `sonar-project.properties` - is present. - When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate - is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. - The SonarQube scan will include parameters to perform a pull request analysis if - there is an open pull request for the branch being built. If the - link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] - is setup properly, pull request decoration in Bitbucket is done automatically. - - The following artifacts are generated by the build task and placed into `.ods/artifacts/` - - * `code-coverage/` - ** `coverage.out` - * `lint-reports/` - ** `report.txt` - * `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` - * `xunit-reports/` - ** `report.xml` - - **Sidecar variant!** Use this task if you need to run a container next to the build task. - For example, this could be used to run a database to allow for integration tests. - The sidecar image to must be supplied via `sidecar-image`. - Apart from the sidecar, the task is an exact copy of `ods-build-go`. - params: - - default: . - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - name: working-dir - type: string - - default: "false" - description: Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - name: enable-cgo - type: string - - default: linux - description: '`GOOS` variable (the execution operating system such as `linux`, - `windows`).' - name: go-os - type: string - - default: amd64 - description: '`GOARCH` variable (the execution architecture such as `arm`, `amd64`).' - name: go-arch - type: string - - default: docker - description: Path to the directory into which the resulting Go binary should be - copied, relative to `working-dir`. This directory may then later be used as - Docker context for example. - name: output-dir - type: string - - default: "" - description: Script to execute before running tests, relative to the working directory. - name: pre-test-script - type: string - - default: "false" - description: Whether the SonarQube quality gate needs to pass for the task to - succeed. - name: sonar-quality-gate - type: string - - default: "false" - description: Whether to skip SonarQube analysis or not. - name: sonar-skip - type: string - - description: Image to use for sidecar - name: sidecar-image - type: string - sidecars: - - Workspaces: null - image: $(params.sidecar-image) - name: sidecar - resources: {} - steps: - - env: - - name: HOME - value: /tekton/home - - name: CI - value: "true" - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-go-toolset:{{.Values.imageTag}}' - name: build-go-binary - resources: {} - script: |2 - - # build-go is build/package/scripts/build-go.sh. - build-go \ - --working-dir=$(params.working-dir) \ - --enable-cgo=$(params.enable-cgo) \ - --go-os=$(params.go-os) \ - --go-arch=$(params.go-arch) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - workingDir: $(workspaces.source.path) - - env: - - name: HOME - value: /tekton/home - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - name: scan-with-sonar - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) - workspaces: - - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-gradle-with-sidecar.yaml b/deploy/central/tasks-chart/templates/task-ods-build-gradle-with-sidecar.yaml deleted file mode 100644 index 64247b57..00000000 --- a/deploy/central/tasks-chart/templates/task-ods-build-gradle-with-sidecar.yaml +++ /dev/null @@ -1,231 +0,0 @@ -# Generated by cmd/sidecar-tasks/main.go; DO NOT EDIT. -apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' -metadata: - creationTimestamp: null - name: '{{default "ods" .Values.taskPrefix}}-build-gradle-with-sidecar{{.Values.taskSuffix}}' -spec: - description: |- - Builds Gradle applications. - - The following steps are executed: - - - build gradle application, using `gradlew clean build`, which includes tests execution and coverage report generation - - SonarQube quality scan - - Notes: - - - tests exclude the vendor directory. - - test results are converted into xUnit format. - - Available environment variables: - - - `ODS_OUTPUT_DIR`: this environment variable points to the folder - that this build expects generated application artifacts to be copied to. - The gradle script should read it and copy there the generated artifacts. - - `NEXUS_*` env vars: `NEXUS_URL`, `NEXUS_USERNAME` and `NEXUS_PASSWORD` - are available and should be read by the gradle script. - - To enable the gradle script to copy the generated application artifacts script follow these steps: - - - read the environment variable `ODS_OUTPUT_DIR` in the buildscript section of the gradle script: - ``` - buildscript { - ext { - outputDir = System.getenv('ODS_OUTPUT_DIR') - } - } - ``` - - customize the jar tasks to set the destination directory - ``` - jar { - println("Set application jar name to 'app'") - archiveBaseName = 'app' - if (outputDir != null) { - println("Set destinationDirectory to '${projectDir}/${outputDir}'") - destinationDirectory = file("${projectDir}/${outputDir}") - } - } - ``` - - To create a coverage report be sure that you add to `gradle.properties` the required - configuration. For example to enable Jacoco coverage repot you will need to: - - - add `jacoco` plugin: - ``` - plugins { - id 'application' - id 'jacoco' - } - ``` - - add task `jacocoTestReport`: - ``` - jacocoTestReport { - reports { - xml.required = true - } - } - ``` - - add `finalizedBy jacocoTestReport` to the task `test`: - ``` - tasks.named('test') { - useJUnitPlatform() - finalizedBy jacocoTestReport - } - ``` - - The exact build recipe can be found at - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[build/package/scripts/build-gradle.sh]. - - After tests ran successfully, the application source code is scanned by SonarQube. - Default SonarQube project properties are provided unless `sonar-project.properties` - is present. - When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate - is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. - The SonarQube scan will include parameters to perform a pull request analysis if - there is an open pull request for the branch being built. If the - link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] - is setup properly, pull request decoration in Bitbucket is done automatically. - - The following artifacts are generated by the build task and placed into `.ods/artifacts/` - - * `code-coverage/` - ** `coverage.xml` - * `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` - * `xunit-reports/` - ** `report.xml` - - **Sidecar variant!** Use this task if you need to run a container next to the build task. - For example, this could be used to run a database to allow for integration tests. - The sidecar image to must be supplied via `sidecar-image`. - Apart from the sidecar, the task is an exact copy of `ods-build-gradle`. - params: - - default: . - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - name: working-dir - type: string - - default: "" - description: Additional gradle tasks to be passed to the gradle build. (default - tasks called are `clean` and `build`). - name: gradle-additional-tasks - type: string - - default: --no-daemon --stacktrace - description: 'Options to be passed to the gradle build. (See ref: https://docs.gradle.org/7.3.3/userguide/command_line_interface.html#sec:command_line_debugging)' - name: gradle-options - type: string - - default: -Dorg.gradle.jvmargs=-Xmx512M - description: 'Will be exposed to the build via `GRADLE_OPTS` environment variable. - Specifies JVM arguments to use when starting the Gradle client VM. The client - VM only handles command line input/output, so it is rare that one would need - to change its VM options. You can still use this to change the settings for - the Gradle daemon which runs the actual build by setting the according Gradle - properties by `-D`. If you want to set the JVM arguments for the actual build - you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` (See ref: https://docs.gradle.org/7.3.3/userguide/build_environment.html#sec:gradle_configuration_properties).' - name: gradle-opts-env - type: string - - default: docker - description: Path to the directory into which the resulting Java application jar - should be copied, relative to `working-dir`. This directory may then later be - used as Docker context for example. - name: output-dir - type: string - - default: "false" - description: Whether the SonarQube quality gate needs to pass for the task to - succeed. - name: sonar-quality-gate - type: string - - default: "false" - description: Whether to skip SonarQube analysis or not. - name: sonar-skip - type: string - - description: Image to use for sidecar - name: sidecar-image - type: string - sidecars: - - Workspaces: null - image: $(params.sidecar-image) - name: sidecar - resources: {} - steps: - - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: /tekton/home - - name: CI - value: "true" - - name: GRADLE_OPTS - value: $(params.gradle-opts-env) - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-gradle-toolset:{{.Values.imageTag}}' - name: build-gradle-binary - resources: {} - script: | - # build-gradle is build/package/scripts/build-gradle.sh. - build-gradle \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ - --gradle-options="$(params.gradle-options)" - workingDir: $(workspaces.source.path) - - env: - - name: HOME - value: /tekton/home - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - name: scan-with-sonar - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) - workspaces: - - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-python-with-sidecar.yaml b/deploy/central/tasks-chart/templates/task-ods-build-python-with-sidecar.yaml deleted file mode 100644 index e77590d7..00000000 --- a/deploy/central/tasks-chart/templates/task-ods-build-python-with-sidecar.yaml +++ /dev/null @@ -1,154 +0,0 @@ -# Generated by cmd/sidecar-tasks/main.go; DO NOT EDIT. -apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' -metadata: - creationTimestamp: null - name: '{{default "ods" .Values.taskPrefix}}-build-python-with-sidecar{{.Values.taskSuffix}}' -spec: - description: |- - Builds Python applications. - - The exact build recipe can be found at - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[build/package/scripts/build-python.sh]. - In particular, the Python source files are expected to be located in `src`. - - After tests ran successfully, the application source code is scanned by SonarQube. - Default SonarQube project properties are provided unless `sonar-project.properties` - is present. - When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate - is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. - The SonarQube scan will include parameters to perform a pull request analysis if - there is an open pull request for the branch being built. If the - link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] - is setup properly, pull request decoration in Bitbucket is done automatically. - - The following artifacts are generated by the build task and placed into `.ods/artifacts/` - - * `code-coverage/` - ** `coverage.xml` - * `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` - * `xunit-reports/` - ** `report.xml` - - **Sidecar variant!** Use this task if you need to run a container next to the build task. - For example, this could be used to run a database to allow for integration tests. - The sidecar image to must be supplied via `sidecar-image`. - Apart from the sidecar, the task is an exact copy of `ods-build-python`. - params: - - default: . - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - name: working-dir - type: string - - default: docker - description: Path to the directory into which outputs should be placed, relative - to `working-dir`. This directory may then later be used as Docker context for - example. - name: output-dir - type: string - - default: "120" - description: Maximum line length. - name: max-line-length - type: string - - default: "" - description: Script to execute before running tests, relative to the working directory. - name: pre-test-script - type: string - - default: "false" - description: Whether quality gate needs to pass. - name: sonar-quality-gate - type: string - - default: "false" - description: Whether to skip the SonarQube analysis or not. - name: sonar-skip - type: string - - description: Image to use for sidecar - name: sidecar-image - type: string - sidecars: - - Workspaces: null - image: $(params.sidecar-image) - name: sidecar - resources: {} - steps: - - env: - - name: HOME - value: /tekton/home - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-python-toolset:{{.Values.imageTag}}' - name: build-python - resources: {} - script: |2 - - # build-python is build/package/scripts/build-python.sh. - build-python \ - --working-dir=$(params.working-dir) \ - --max-line-length=$(params.max-line-length) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - workingDir: $(workspaces.source.path) - - env: - - name: HOME - value: /tekton/home - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - name: scan-with-sonar - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) - workspaces: - - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-typescript-with-sidecar.yaml b/deploy/central/tasks-chart/templates/task-ods-build-typescript-with-sidecar.yaml deleted file mode 100644 index ade6cc1d..00000000 --- a/deploy/central/tasks-chart/templates/task-ods-build-typescript-with-sidecar.yaml +++ /dev/null @@ -1,190 +0,0 @@ -# Generated by cmd/sidecar-tasks/main.go; DO NOT EDIT. -apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' -metadata: - creationTimestamp: null - name: '{{default "ods" .Values.taskPrefix}}-build-typescript-with-sidecar{{.Values.taskSuffix}}' -spec: - description: |- - Builds Typescript applications. - - The following steps are executed: - - - checks that package.json and package-lock.json exists to require best practice of using lock files. See also link:https://github.com/opendevstack/ods-pipeline/discussions/411[discussion 411] - - linting using `eslint` - - build typescript application, using `npm run build` - - test execution - - SonarQube quality scan - - For `eslint` to work there needs to be a config file (`eslintrc.json` or similar) at the root of the working directory. - This can be done by running `eslint --init` or by following the link:https://eslint.org/docs/user-guide/getting-started[official documentation] - - The exact build recipe can be found at - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-typescript.sh[build/package/scripts/build-typescript.sh]. - In particular, `npm run build` is expected to place outputs into `dist`. - - After tests ran successfully, the application source code is scanned by SonarQube. - Default SonarQube project properties are provided unless `sonar-project.properties` - is present. - When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate - is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. - The SonarQube scan will include parameters to perform a pull request analysis if - there is an open pull request for the branch being built. If the - link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] - is setup properly, pull request decoration in Bitbucket is done automatically. - - The following artifacts are generated by the build task and placed into `.ods/artifacts/` - - * `code-coverage/` - ** `clover.xml` - ** `coverage-final.json` - ** `lcov.info` - * `lint-reports` - ** `report.txt` - * `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` - * `xunit-reports/` - ** `report.xml` - - **Sidecar variant!** Use this task if you need to run a container next to the build task. - For example, this could be used to run a database to allow for integration tests. - The sidecar image to must be supplied via `sidecar-image`. - Apart from the sidecar, the task is an exact copy of `ods-build-typescript`. - params: - - default: . - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - name: working-dir - type: string - - default: docker - description: Path to the directory into which outputs should be placed, relative - to `working-dir`. This directory may then later be used as Docker context for - example. - name: output-dir - type: string - - default: "0" - description: Maximum of allowed linting warnings after which eslint will exit - with an error. Set to "-1" to never exit with an error due to warnings. - name: max-lint-warnings - type: string - - default: .js,.ts,.jsx,.tsx,.svelte - description: File extensions to lint separated by a comma. - name: lint-file-ext - type: string - - default: "false" - description: Whether quality gate needs to pass. - name: sonar-quality-gate - type: string - - default: "false" - description: Whether to skip the SonarQube analysis or not. - name: sonar-skip - type: string - - default: "16" - description: 'Node.js version to use - supported versions: 16' - name: node-version - type: string - - default: dist - description: Must match the directory into which `npm run build` places files. - The files inside `build-dir` will be copied to the `dist` folder in `output-dir` - As a result the files will be in `$output-dir/dist` Other common build directories - are `build` and `public`. - name: build-dir - type: string - - default: "false" - description: Whether `node-modules` is copied to the `output-dir` or not. If copied - the node modules are in `$output-dir/dist/node_modules`. For frontend components - this should be set to "false", while for backend components this should be set - to "true". - name: copy-node-modules - type: string - - description: Image to use for sidecar - name: sidecar-image - type: string - sidecars: - - Workspaces: null - image: $(params.sidecar-image) - name: sidecar - resources: {} - steps: - - env: - - name: HOME - value: /tekton/home - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-node$(params.node-version)-typescript-toolset:{{.Values.imageTag}}' - name: build-typescript - resources: {} - script: |2 - - # build-typescript is build/package/scripts/build-typescript.sh. - build-typescript \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} \ - --max-lint-warnings=$(params.max-lint-warnings) \ - --lint-file-ext=$(params.lint-file-ext) \ - --build-dir=$(params.build-dir) \ - --copy-node-modules=$(params.copy-node-modules) - workingDir: $(workspaces.source.path) - - env: - - name: HOME - value: /tekton/home - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - name: scan-with-sonar - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) - workspaces: - - name: source diff --git a/deploy/central/tasks-chart/values.kind.yaml b/deploy/central/tasks-chart/values.kind.yaml deleted file mode 100644 index 610c125b..00000000 --- a/deploy/central/tasks-chart/values.kind.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Default values for chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -registry: localhost:5000 -imageTag: latest -taskSuffix: '' -pushRegistry: kind-registry.kind:5000 diff --git a/deploy/central/tasks-chart/values.yaml b/deploy/central/tasks-chart/values.yaml deleted file mode 100644 index d3936cba..00000000 --- a/deploy/central/tasks-chart/values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Default values for chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -registry: image-registry.openshift-image-registry.svc:5000 -namespace: ods -imageTag: 0.2.0 -taskSuffix: -v0-2-0 -pushRegistry: image-registry.openshift-image-registry.svc:5000 - - -# Optional Values - -# Custom task kind (defaults to "ClusterTask") -# taskKind: "Task" - -# Custom task prefix (defaults to "ods") -# taskPrefix: "foo" diff --git a/deploy/cd-namespace/install.sh b/deploy/install.sh similarity index 95% rename from deploy/cd-namespace/install.sh rename to deploy/install.sh index 8c654e1c..4c81934a 100755 --- a/deploy/cd-namespace/install.sh +++ b/deploy/install.sh @@ -10,9 +10,10 @@ NAMESPACE="" RELEASE_NAME="ods-pipeline" SERVICEACCOUNT="pipeline" VALUES_FILE="values.custom.yaml" -CHART_DIR="./chart" +CHART_DIR="./ods-pipeline" while [[ "$#" -gt 0 ]]; do + # shellcheck disable=SC2034 case $1 in -v|--verbose) VERBOSE="true";; @@ -45,6 +46,11 @@ if [ "${VERBOSE}" == "true" ]; then set -x fi +if [ -z "${NAMESPACE}" ]; then + echo "--namespace is required" + exit 1 +fi + if kubectl -n "${NAMESPACE}" get serviceaccount/"${SERVICEACCOUNT}" &> /dev/null; then echo "Serviceaccount exists already ..." else diff --git a/deploy/cd-namespace/chart/.gitignore b/deploy/ods-pipeline/.gitignore similarity index 100% rename from deploy/cd-namespace/chart/.gitignore rename to deploy/ods-pipeline/.gitignore diff --git a/deploy/ods-pipeline/Chart.yaml b/deploy/ods-pipeline/Chart.yaml new file mode 100644 index 00000000..d68c8d40 --- /dev/null +++ b/deploy/ods-pipeline/Chart.yaml @@ -0,0 +1,35 @@ +apiVersion: v2 +name: ods-pipeline +description: Umbrella chart for ods-pipeline + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.2.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.2.0" + +dependencies: + - name: images + version: 0.2.0 + condition: images.enabled + - name: setup + version: 0.2.0 + condition: setup.enabled + - name: tasks + version: 0.2.0 + condition: tasks.enabled diff --git a/deploy/central/images-chart/Chart.yaml b/deploy/ods-pipeline/charts/images/Chart.yaml similarity index 97% rename from deploy/central/images-chart/Chart.yaml rename to deploy/ods-pipeline/charts/images/Chart.yaml index c2e846b6..f7d5c97f 100644 --- a/deploy/central/images-chart/Chart.yaml +++ b/deploy/ods-pipeline/charts/images/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: ods-pipeline-images +name: images description: A Helm chart to setup ODS pipeline images # A chart can be either an 'application' or a 'library' chart. diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.buildah b/deploy/ods-pipeline/charts/images/docker/Dockerfile.buildah new file mode 100644 index 00000000..4d185e60 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.buildah @@ -0,0 +1,15 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-buildah:$imageTag + +ARG aquasecScannerUrl + +# Optionally install Aqua scanner. +RUN if [ -z $aquasecScannerUrl ] ; then echo 'Skipping Aqua scanner installation!' ; else echo 'Installing Aqua scanner... getting binary from' $aquasecScannerUrl \ + && curl -v -L $aquasecScannerUrl -o aquasec \ + && mv aquasec /usr/local/bin/ \ + && chmod +x /usr/local/bin/aquasec \ + && echo 'Aqua scanner version:' \ + && aquasec version \ + && echo 'Aqua scanner installation completed!'; \ + fi diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.finish b/deploy/ods-pipeline/charts/images/docker/Dockerfile.finish new file mode 100644 index 00000000..0c45ff24 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.finish @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-finish:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.go-toolset b/deploy/ods-pipeline/charts/images/docker/Dockerfile.go-toolset new file mode 100644 index 00000000..d209cf6e --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.go-toolset @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-go-toolset:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.gradle-toolset b/deploy/ods-pipeline/charts/images/docker/Dockerfile.gradle-toolset new file mode 100644 index 00000000..6671b49f --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.gradle-toolset @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-gradle-toolset:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.helm b/deploy/ods-pipeline/charts/images/docker/Dockerfile.helm new file mode 100644 index 00000000..b3662a3b --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.helm @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-helm:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.node16-typescript-toolset b/deploy/ods-pipeline/charts/images/docker/Dockerfile.node16-typescript-toolset new file mode 100644 index 00000000..31fe1103 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.node16-typescript-toolset @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-node16-typescript-toolset:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.pipeline-manager b/deploy/ods-pipeline/charts/images/docker/Dockerfile.pipeline-manager new file mode 100644 index 00000000..831c808d --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.pipeline-manager @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-pipeline-manager:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.python-toolset b/deploy/ods-pipeline/charts/images/docker/Dockerfile.python-toolset new file mode 100644 index 00000000..0b433897 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.python-toolset @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-python-toolset:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.sonar b/deploy/ods-pipeline/charts/images/docker/Dockerfile.sonar new file mode 100644 index 00000000..3e80258d --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.sonar @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-sonar:$imageTag diff --git a/deploy/ods-pipeline/charts/images/docker/Dockerfile.start b/deploy/ods-pipeline/charts/images/docker/Dockerfile.start new file mode 100644 index 00000000..5dca5ba8 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/docker/Dockerfile.start @@ -0,0 +1,3 @@ +ARG imageTag="latest" + +FROM ghcr.io/opendevstack/ods-pipeline/ods-start:$imageTag diff --git a/deploy/central/images-chart/templates/bc-ods-buildah.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-buildah.yaml similarity index 54% rename from deploy/central/images-chart/templates/bc-ods-buildah.yaml rename to deploy/ods-pipeline/charts/images/templates/bc-ods-buildah.yaml index 5f6c442f..4ef2acf1 100644 --- a/deploy/central/images-chart/templates/bc-ods-buildah.yaml +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-buildah.yaml @@ -7,26 +7,20 @@ spec: output: to: kind: ImageStreamTag - name: 'ods-buildah:{{.Values.imageTag}}' + name: 'ods-buildah:{{.Values.global.imageTag}}' resources: {} successfulBuildsHistoryLimit: 5 failedBuildsHistoryLimit: 5 + postCommit: {} strategy: type: Docker dockerStrategy: - dockerfilePath: build/package/Dockerfile.buildah - from: - kind: DockerImage - name: 'registry.redhat.io/ubi8:8.4' buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' - name: aquasecScannerUrl value: '{{.Values.aquasecScannerUrl}}' - pullSecret: - name: registry.redhat.io - postCommit: {} source: - type: Git - git: - uri: '{{.Values.odsPipelineGitRepoUri}}' - ref: '{{.Values.odsPipelineGitRepoRef}}' + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.buildah" | nindent 6}} runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-finish.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-finish.yaml new file mode 100644 index 00000000..91041ad5 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-finish.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-finish +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-finish:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.finish" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-go-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-go-toolset.yaml new file mode 100644 index 00000000..7414ac76 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-go-toolset.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-go-toolset +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-go-toolset:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.go-toolset" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-gradle-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-gradle-toolset.yaml new file mode 100644 index 00000000..aa7cec03 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-gradle-toolset.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-gradle-toolset +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-gradle-toolset:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.gradle-toolset" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-helm.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-helm.yaml new file mode 100644 index 00000000..18d956f6 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-helm.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-helm +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-helm:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.helm" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-node16-typescript-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-node16-typescript-toolset.yaml new file mode 100644 index 00000000..6ea5a3e8 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-node16-typescript-toolset.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-node16-typescript-toolset +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-node16-typescript-toolset:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.node16-typescript-toolset" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-pipeline-manager.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-pipeline-manager.yaml new file mode 100644 index 00000000..26e1f1a6 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-pipeline-manager.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-pipeline-manager +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-pipeline-manager:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.pipeline-manager" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-python-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-python-toolset.yaml new file mode 100644 index 00000000..e26c7e66 --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-python-toolset.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-python-toolset +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-python-toolset:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.python-toolset" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-sonar.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-sonar.yaml new file mode 100644 index 00000000..b9fb297a --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-sonar.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-sonar +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-sonar:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.sonar" | nindent 6}} + runPolicy: Serial diff --git a/deploy/ods-pipeline/charts/images/templates/bc-ods-start.yaml b/deploy/ods-pipeline/charts/images/templates/bc-ods-start.yaml new file mode 100644 index 00000000..1372888c --- /dev/null +++ b/deploy/ods-pipeline/charts/images/templates/bc-ods-start.yaml @@ -0,0 +1,24 @@ +kind: BuildConfig +apiVersion: build.openshift.io/v1 +metadata: + name: ods-start +spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: 'ods-start:{{.Values.global.imageTag}}' + resources: {} + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + postCommit: {} + strategy: + type: Docker + dockerStrategy: + buildArgs: + - name: imageTag + value: '{{.Values.global.imageTag}}' + source: + dockerfile: |- + {{- .Files.Get "docker/Dockerfile.start" | nindent 6}} + runPolicy: Serial diff --git a/deploy/central/images-chart/templates/is-ods-buildah.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-buildah.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-buildah.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-buildah.yaml diff --git a/deploy/central/images-chart/templates/is-ods-finish.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-finish.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-finish.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-finish.yaml diff --git a/deploy/central/images-chart/templates/is-ods-go-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-go-toolset.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-go-toolset.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-go-toolset.yaml diff --git a/deploy/central/images-chart/templates/is-ods-gradle-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-gradle-toolset.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-gradle-toolset.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-gradle-toolset.yaml diff --git a/deploy/central/images-chart/templates/is-ods-helm.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-helm.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-helm.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-helm.yaml diff --git a/deploy/central/images-chart/templates/is-ods-node16-typescript-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-node16-typescript-toolset.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-node16-typescript-toolset.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-node16-typescript-toolset.yaml diff --git a/deploy/central/images-chart/templates/is-ods-pipeline-manager.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-pipeline-manager.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-pipeline-manager.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-pipeline-manager.yaml diff --git a/deploy/central/images-chart/templates/is-ods-python-toolset.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-python-toolset.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-python-toolset.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-python-toolset.yaml diff --git a/deploy/central/images-chart/templates/is-ods-sonar.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-sonar.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-sonar.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-sonar.yaml diff --git a/deploy/central/images-chart/templates/is-ods-start.yaml b/deploy/ods-pipeline/charts/images/templates/is-ods-start.yaml similarity index 100% rename from deploy/central/images-chart/templates/is-ods-start.yaml rename to deploy/ods-pipeline/charts/images/templates/is-ods-start.yaml diff --git a/deploy/ods-pipeline/charts/images/values.yaml b/deploy/ods-pipeline/charts/images/values.yaml new file mode 100644 index 00000000..e8d0e03d --- /dev/null +++ b/deploy/ods-pipeline/charts/images/values.yaml @@ -0,0 +1,2 @@ +# override name to be consistent with previous, separate chart naming convention(s) +nameOverride: ods-pipeline diff --git a/deploy/cd-namespace/chart/Chart.yaml b/deploy/ods-pipeline/charts/setup/Chart.yaml similarity index 98% rename from deploy/cd-namespace/chart/Chart.yaml rename to deploy/ods-pipeline/charts/setup/Chart.yaml index 0ffb0716..0940c121 100644 --- a/deploy/cd-namespace/chart/Chart.yaml +++ b/deploy/ods-pipeline/charts/setup/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: ods-pipeline +name: setup description: A Helm chart to setup ODS pipelines # A chart can be either an 'application' or a 'library' chart. diff --git a/deploy/cd-namespace/chart/templates/_helpers.tpl b/deploy/ods-pipeline/charts/setup/templates/_helpers.tpl similarity index 100% rename from deploy/cd-namespace/chart/templates/_helpers.tpl rename to deploy/ods-pipeline/charts/setup/templates/_helpers.tpl diff --git a/deploy/cd-namespace/chart/templates/configmap-aqua.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-aqua.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-bitbucket.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-bitbucket.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-cluster.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-cluster.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-nexus.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-nexus.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-notifications.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-notifications.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-pipeline.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-pipeline.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml diff --git a/deploy/cd-namespace/chart/templates/configmap-sonar.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/configmap-sonar.yaml rename to deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml diff --git a/deploy/cd-namespace/chart/templates/deployment.yaml b/deploy/ods-pipeline/charts/setup/templates/deployment.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/deployment.yaml rename to deploy/ods-pipeline/charts/setup/templates/deployment.yaml diff --git a/deploy/cd-namespace/chart/templates/secret-aqua-auth.yaml b/deploy/ods-pipeline/charts/setup/templates/secret-aqua-auth.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/secret-aqua-auth.yaml rename to deploy/ods-pipeline/charts/setup/templates/secret-aqua-auth.yaml diff --git a/deploy/cd-namespace/chart/templates/secret-bitbucket-auth.yaml b/deploy/ods-pipeline/charts/setup/templates/secret-bitbucket-auth.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/secret-bitbucket-auth.yaml rename to deploy/ods-pipeline/charts/setup/templates/secret-bitbucket-auth.yaml diff --git a/deploy/cd-namespace/chart/templates/secret-bitbucket-webhook.yaml b/deploy/ods-pipeline/charts/setup/templates/secret-bitbucket-webhook.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/secret-bitbucket-webhook.yaml rename to deploy/ods-pipeline/charts/setup/templates/secret-bitbucket-webhook.yaml diff --git a/deploy/cd-namespace/chart/templates/secret-nexus-auth.yaml b/deploy/ods-pipeline/charts/setup/templates/secret-nexus-auth.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/secret-nexus-auth.yaml rename to deploy/ods-pipeline/charts/setup/templates/secret-nexus-auth.yaml diff --git a/deploy/cd-namespace/chart/templates/secret-sonar-auth.yaml b/deploy/ods-pipeline/charts/setup/templates/secret-sonar-auth.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/secret-sonar-auth.yaml rename to deploy/ods-pipeline/charts/setup/templates/secret-sonar-auth.yaml diff --git a/deploy/cd-namespace/chart/templates/service.yaml b/deploy/ods-pipeline/charts/setup/templates/service.yaml similarity index 100% rename from deploy/cd-namespace/chart/templates/service.yaml rename to deploy/ods-pipeline/charts/setup/templates/service.yaml diff --git a/deploy/ods-pipeline/charts/setup/values.yaml b/deploy/ods-pipeline/charts/setup/values.yaml new file mode 100644 index 00000000..e8d0e03d --- /dev/null +++ b/deploy/ods-pipeline/charts/setup/values.yaml @@ -0,0 +1,2 @@ +# override name to be consistent with previous, separate chart naming convention(s) +nameOverride: ods-pipeline diff --git a/deploy/central/tasks-chart/Chart.yaml b/deploy/ods-pipeline/charts/tasks/Chart.yaml similarity index 97% rename from deploy/central/tasks-chart/Chart.yaml rename to deploy/ods-pipeline/charts/tasks/Chart.yaml index cb5acd1d..59fe8464 100644 --- a/deploy/central/tasks-chart/Chart.yaml +++ b/deploy/ods-pipeline/charts/tasks/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: ods-pipeline-tasks +name: tasks description: A Helm chart to setup ODS pipeline tasks # A chart can be either an 'application' or a 'library' chart. diff --git a/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl b/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl new file mode 100644 index 00000000..b2f402f1 --- /dev/null +++ b/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl @@ -0,0 +1,40 @@ +{{- define "sonar-step"}} +- name: scan-with-sonar + # Image is built from build/package/Dockerfile.sonar. + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.global.imageTag}}' + env: + - name: HOME + value: '/tekton/home' + - name: SONAR_URL + valueFrom: + configMapKeyRef: + key: url + name: ods-sonar + - name: SONAR_EDITION + valueFrom: + configMapKeyRef: + key: edition + name: ods-sonar + - name: SONAR_AUTH_TOKEN + valueFrom: + secretKeyRef: + key: password + name: ods-sonar-auth + - name: DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: ods-pipeline + resources: {} + script: | + if [ "$(params.sonar-skip)" = "true" ]; then + echo "Skipping SonarQube analysis" + else + mkdir -p .ods/artifacts/sonarqube-analysis + # sonar is built from cmd/sonar/main.go. + sonar \ + -working-dir=$(params.working-dir) \ + -quality-gate=$(params.sonar-quality-gate) + fi + workingDir: $(workspaces.source.path) +{{- end}} diff --git a/deploy/central/tasks-chart/templates/task-ods-build-go.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml similarity index 77% rename from deploy/central/tasks-chart/templates/task-ods-build-go.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml index bfdd468f..7eaabc1f 100644 --- a/deploy/central/tasks-chart/templates/task-ods-build-go.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-go{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-build-go{{.Values.global.taskSuffix}}' spec: description: | Builds Go (module) applications. @@ -81,10 +81,14 @@ spec: description: Whether to skip SonarQube analysis or not. type: string default: "false" + {{- with ((.Values.go).sidecars) }} + sidecars: + {{- toYaml . | nindent 4 }} + {{- end }} steps: - name: build-go-binary # Image is built from build/package/Dockerfile.go-toolset. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-go-toolset:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-go-toolset:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' @@ -95,7 +99,8 @@ spec: configMapKeyRef: key: debug name: ods-pipeline - resources: {} + resources: + {{- (.Values.go).resources | default dict | toYaml | nindent 8 }} script: | # build-go is build/package/scripts/build-go.sh. @@ -108,43 +113,6 @@ spec: --output-dir=$(params.output-dir) \ --debug=${DEBUG} workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) + {{- include "sonar-step" . | indent 4}} workspaces: - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-gradle.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml similarity index 83% rename from deploy/central/tasks-chart/templates/task-ods-build-gradle.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml index bd47c9cf..df8d9729 100644 --- a/deploy/central/tasks-chart/templates/task-ods-build-gradle.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-gradle{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-build-gradle{{.Values.global.taskSuffix}}' spec: description: | Builds Gradle applications. @@ -136,10 +136,14 @@ spec: description: Whether to skip SonarQube analysis or not. type: string default: "false" + {{- with ((.Values.gradle).sidecars) }} + sidecars: + {{- toYaml . | nindent 4 }} + {{- end }} steps: - name: build-gradle-binary # Image is built from build/package/Dockerfile.gradle-toolset. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-gradle-toolset:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-gradle-toolset:{{.Values.global.imageTag}}' env: - name: DEBUG valueFrom: @@ -167,7 +171,8 @@ spec: secretKeyRef: key: password name: ods-nexus-auth - resources: {} + resources: + {{- (.Values.gradle).resources | default dict | toYaml | nindent 8 }} script: | # build-gradle is build/package/scripts/build-gradle.sh. build-gradle \ @@ -176,43 +181,6 @@ spec: --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ --gradle-options="$(params.gradle-options)" workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) + {{- include "sonar-step" . | indent 4}} workspaces: - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-python.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml similarity index 73% rename from deploy/central/tasks-chart/templates/task-ods-build-python.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml index c447d919..a4d2f29e 100644 --- a/deploy/central/tasks-chart/templates/task-ods-build-python.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-python{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-build-python{{.Values.global.taskSuffix}}' spec: description: | Builds Python applications. @@ -59,10 +59,14 @@ spec: description: Whether to skip the SonarQube analysis or not. type: string default: "false" + {{- with ((.Values.python).sidecars) }} + sidecars: + {{- toYaml . | nindent 4 }} + {{- end }} steps: - name: build-python # Image is built from build/package/Dockerfile.python-toolset. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-python-toolset:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-python-toolset:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' @@ -88,7 +92,8 @@ spec: configMapKeyRef: key: debug name: ods-pipeline - resources: {} + resources: + {{- (.Values.python).resources | default dict | toYaml | nindent 8 }} script: | # build-python is build/package/scripts/build-python.sh. @@ -99,43 +104,6 @@ spec: --output-dir=$(params.output-dir) \ --debug=${DEBUG} workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) + {{- include "sonar-step" . | indent 4}} workspaces: - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-build-typescript.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-typescript.yaml similarity index 80% rename from deploy/central/tasks-chart/templates/task-ods-build-typescript.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-build-typescript.yaml index ecb8bee7..65bf7bb1 100644 --- a/deploy/central/tasks-chart/templates/task-ods-build-typescript.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-typescript.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-typescript{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-build-typescript{{.Values.global.taskSuffix}}' spec: description: | Builds Typescript applications. @@ -96,10 +96,14 @@ spec: while for backend components this should be set to "true". type: string default: "false" + {{- with ((.Values.typescript).sidecars) }} + sidecars: + {{- toYaml . | nindent 4 }} + {{- end }} steps: - name: build-typescript # Image is built from build/package/Dockerfile.node-xx-typescript-toolset. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-node$(params.node-version)-typescript-toolset:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-node$(params.node-version)-typescript-toolset:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' @@ -125,7 +129,8 @@ spec: configMapKeyRef: key: debug name: ods-pipeline - resources: {} + resources: + {{- (.Values.typescript).resources | default dict | toYaml | nindent 8 }} script: | # build-typescript is build/package/scripts/build-typescript.sh. @@ -138,43 +143,6 @@ spec: --build-dir=$(params.build-dir) \ --copy-node-modules=$(params.copy-node-modules) workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-sonar:{{.Values.imageTag}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) - fi - workingDir: $(workspaces.source.path) + {{- include "sonar-step" . | indent 4}} workspaces: - name: source diff --git a/deploy/central/tasks-chart/templates/task-ods-deploy-helm.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml similarity index 98% rename from deploy/central/tasks-chart/templates/task-ods-deploy-helm.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml index a93d25de..bae9eb99 100644 --- a/deploy/central/tasks-chart/templates/task-ods-deploy-helm.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-deploy-helm{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-deploy-helm{{.Values.global.taskSuffix}}' spec: description: | Deploy Helm charts. @@ -89,7 +89,7 @@ spec: steps: - name: helm-upgrade-from-repo # Image is built from build/package/Dockerfile.helm. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-helm:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-helm:{{.Values.global.imageTag}}' env: - name: DEBUG valueFrom: diff --git a/deploy/central/tasks-chart/templates/task-ods-finish.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml similarity index 95% rename from deploy/central/tasks-chart/templates/task-ods-finish.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml index c540c1f7..89e930c4 100644 --- a/deploy/central/tasks-chart/templates/task-ods-finish.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-finish{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-finish{{.Values.global.taskSuffix}}' spec: description: | Finishes the pipeline run. @@ -32,7 +32,7 @@ spec: steps: - name: ods-finish # Image is built from build/package/Dockerfile.finish. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-finish:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-finish:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/central/tasks-chart/templates/task-ods-package-image.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml similarity index 98% rename from deploy/central/tasks-chart/templates/task-ods-package-image.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml index acf6e509..30f43277 100644 --- a/deploy/central/tasks-chart/templates/task-ods-package-image.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-package-image{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-package-image{{.Values.global.taskSuffix}}' spec: description: | Packages applications into container images using @@ -82,7 +82,7 @@ spec: steps: - name: build-and-push-image # Image is built from build/package/Dockerfile.buildah. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-buildah:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-buildah:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/central/tasks-chart/templates/task-ods-start.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml similarity index 97% rename from deploy/central/tasks-chart/templates/task-ods-start.yaml rename to deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml index 10a25db3..09e5c4b6 100644 --- a/deploy/central/tasks-chart/templates/task-ods-start.yaml +++ b/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "ClusterTask" .Values.taskKind}}' +kind: '{{default "Task" .Values.taskKind}}' metadata: - name: '{{default "ods" .Values.taskPrefix}}-start{{.Values.taskSuffix}}' + name: '{{default "ods" .Values.taskPrefix}}-start{{.Values.global.taskSuffix}}' spec: description: | Starts the pipeline run. @@ -116,7 +116,7 @@ spec: steps: - name: ods-start # Image is built from build/package/Dockerfile.start. - image: '{{.Values.registry}}/{{.Values.namespace}}/ods-start:{{.Values.imageTag}}' + image: '{{.Values.registry}}/{{.Values.namespace}}/ods-start:{{.Values.global.imageTag}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/central/tasks-chart/values.docs.yaml b/deploy/ods-pipeline/charts/tasks/values.docs.yaml similarity index 85% rename from deploy/central/tasks-chart/values.docs.yaml rename to deploy/ods-pipeline/charts/tasks/values.docs.yaml index cf61c70c..5d72547f 100644 --- a/deploy/central/tasks-chart/values.docs.yaml +++ b/deploy/ods-pipeline/charts/tasks/values.docs.yaml @@ -1,9 +1,10 @@ # Default values for chart. # This is a YAML-formatted file. # Declare variables to be passed into your templates. +global: + imageTag: 0.2.0 + taskSuffix: '' registry: image-registry.openshift-image-registry.svc:5000 namespace: ods -imageTag: 0.2.0 -taskSuffix: '' pushRegistry: image-registry.openshift-image-registry.svc:5000 diff --git a/deploy/ods-pipeline/charts/tasks/values.yaml b/deploy/ods-pipeline/charts/tasks/values.yaml new file mode 100644 index 00000000..e8d0e03d --- /dev/null +++ b/deploy/ods-pipeline/charts/tasks/values.yaml @@ -0,0 +1,2 @@ +# override name to be consistent with previous, separate chart naming convention(s) +nameOverride: ods-pipeline diff --git a/deploy/ods-pipeline/secrets.yaml b/deploy/ods-pipeline/secrets.yaml new file mode 100644 index 00000000..03eff9c1 --- /dev/null +++ b/deploy/ods-pipeline/secrets.yaml @@ -0,0 +1,29 @@ +setup: + # Bitbucket + # Bitbucket personal access token (PAT) in clear text. This token needs to be + # created in Bitbucket for the user identified by "bitbucketUsername". + # The user requires write permissions in order to set build status or add code + # insights on commits. + bitbucketAccessToken: '' + # Shared secret between webhooks in Bitbucket and the event listener. Create + # a random secret and enter it here, then use it later on as the secret in any + # webhook you setup. The value needs to be in clear text. + bitbucketWebhookSecret: '' + + # Nexus + # Nexus password for the user identified by "nexusUsername" in clear text. + nexusPassword: '' + + # Sonar + # SonarQube password for the user identified by "sonarUsername" in clear text. + sonarAuthToken: '' + + # Aqua + # Aqua password for the user identified by "aquaUsername" in clear text. + # Leave empty when not using Aqua. + aquaPassword: '' + +images: + # Aqua + # URL to download aqua-scanner binary. The URL must contain basic authentication. + aquasecScannerUrl: '' diff --git a/deploy/ods-pipeline/values.kind.yaml b/deploy/ods-pipeline/values.kind.yaml new file mode 100644 index 00000000..4cc08b2a --- /dev/null +++ b/deploy/ods-pipeline/values.kind.yaml @@ -0,0 +1,108 @@ +global: + imageTag: latest + taskSuffix: '' + +## +# setup chart +## +setup: + enabled: true + # General + serviceAccountName: 'pipeline' + + # Cluster + consoleUrl: 'http://example.com' + + # Pipeline Manager + pipelineManager: + storageProvisioner: '' + storageClassName: 'standard' + storageSize: '2Gi' + replicaCount: 1 + image: + registry: localhost:5000 + namespace: ods + repository: ods-pipeline-manager + pullPolicy: Always + tag: "latest" + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + # Notification Webhook + notification: + # notifications are disabled by default, i.e. the ConfigMap won't be installed + enabled: false + # URL of the configured webhook + url: 'http://example.com' + # The HTTP method to be used + method: 'POST' + # The HTTP content type header + contentType: 'application/json' + # Specify the outcomes you want to be notified of (allowed values: c.f. + # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) + notifyOnStatus: + - 'Failed' + # Template to be processed and accepted by the configured webhook in use + # Below example might work for Microsoft Teams + requestTemplate: |- + { + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, + "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "sections": [ + { + "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "activitySubtitle": "On Project {{.ODSContext.Project}}", + "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", + "facts": [ + { + "name": "GitRef", + "value": "{{.ODSContext.GitRef}}" + }, + { + "name": "Environment", + "value": "{{.ODSContext.Environment}}" + } + ], + "markdown": true + } + ], + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Go to PipelineRun", + "targets": [ + { + "os": "default", + "uri": "{{.PipelineRunURL}}" + } + ] + } + ] + } + +## +# tasks chart +## +tasks: + enabled: true + # Default values for chart. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + # To test with the latest public ods-pipeline images, set global.imageTag to 'latest' and use + # + # registry: ghcr.io + # namespace: opendevstack/ods-pipeline + + registry: localhost:5000 + pushRegistry: kind-registry.kind:5000 + +images: + enabled: false diff --git a/deploy/ods-pipeline/values.yaml b/deploy/ods-pipeline/values.yaml new file mode 100644 index 00000000..58b42c09 --- /dev/null +++ b/deploy/ods-pipeline/values.yaml @@ -0,0 +1,217 @@ +# ################################################################################################ # +# UMBRELLA # +# ################################################################################################ # +global: + # Image tag to use for images referenced by tasks. + imageTag: 0.2.0 + # Suffix to append to the task name. + taskSuffix: -v0-2-0 + + +# ################################################################################################ # +# IMAGES CHART CONFIG # +# ################################################################################################ # +images: + # enable chart containing Openshift image streams and build configs + enabled: true + + +# ################################################################################################ # +# SETUP CHART CONFIG # +# ################################################################################################ # +setup: + # enable configuration and management chart + enabled: true + + # General + # Serviceaccount name to use for pipeline resources. + serviceAccountName: 'pipeline' + # Whether to enable debug mode + debug: 'false' + + # Bitbucket + # Bitbucket URL (including scheme). Example: https://bitbucket.example.com. + bitbucketUrl: '' + # Bitbucket username. Example: cd_user. + bitbucketUsername: '' + + # Nexus + # Nexus URL (including scheme). Example: https://nexus.example.com. + nexusUrl: '' + # Nexus username. Example: developer. + nexusUsername: '' + # Nexus repository for temporary artifacts (stage = dev) + nexusTemporaryRepository: 'ods-temporary-artifacts' + # Nexus repository for permanent artifacts (stage = qa|prod) + nexusPermanentRepository: 'ods-permanent-artifacts' + + # Sonar + # SonarQube URL (including scheme). Example: https://sonarqube.example.com. + sonarUrl: '' + # SonarQube username. Example: developer. + sonarUsername: '' + # SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' + sonarEdition: 'community' + + # Aqua + # Aqua URL (including scheme). Example: https://aqua.example.com. + # Leave empty when not using Aqua. + aquaUrl: '' + # Aqua registry name. + # Leave empty when not using Aqua. + aquaRegistry: '' + # Aqua username. Example: developer. + # Leave empty when not using Aqua. + aquaUsername: '' + + # Cluster + # URL (including scheme) of the OpenShift Web Console. + consoleUrl: 'http://example.com' + + # Notification Webhook + notification: + # notifications are disabled by default, i.e. the ConfigMap won't be installed + enabled: false + # URL of the configured webhook + url: 'http://example.com' + # The HTTP method to be used + method: 'POST' + # The HTTP content type header + contentType: 'application/json' + # Specify the outcomes you want to be notified of (allowed values: c.f. + # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) + notifyOnStatus: + - 'Failed' + # Template to be processed and accepted by the configured webhook in use + # Below example might work for Microsoft Teams + requestTemplate: |- + { + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, + "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "sections": [ + { + "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "activitySubtitle": "On Project {{.ODSContext.Project}}", + "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", + "facts": [ + { + "name": "GitRef", + "value": "{{.ODSContext.GitRef}}" + }, + { + "name": "Environment", + "value": "{{.ODSContext.Environment}}" + } + ], + "markdown": true + } + ], + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Go to PipelineRun", + "targets": [ + { + "os": "default", + "uri": "{{.PipelineRunURL}}" + } + ] + } + ] + } + + # Pipeline(Run) Pruning + # Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. + # Must be at least 1. + pipelineRunMinKeepHours: '48' + # Maximum number of pipeline runs to keep per stage (stages: DEV, QA, PROD). + # Must be at least 1. + pipelineRunMaxKeepRuns: '20' + + # Pipeline Manager + pipelineManager: + # PVC (used for the pipeline workspace) + # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. + storageProvisioner: 'kubernetes.io/aws-ebs' + # Storage class. On AWS backed clusters, use 'gp2'. + storageClassName: 'gp2' + # Storage size. Defaults to 2Gi unless set explicitly here. + storageSize: '5Gi' + # Number of replicas to run for the pipeline manager. + replicaCount: 1 + image: + # Image registry from which to pull the pipeline manager container image. + registry: image-registry.openshift-image-registry.svc:5000 + # Namespace from which to pull the pipeline manager container image. + # If not given, the image is pulled from the release namespace. + namespace: ods + # Repository (ImageStream) from which to pull the pipeline manager + # container image. + # If not given, the image name equals the chart name. + repository: ods-pipeline-manager + # Pull policy. + pullPolicy: Always + # Image tag to pull. + # If not given, defaults to the chart appVersion. + # tag: "0.2.0" + # Deployment pod resources. Typically these settings should not need to change. + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + + +# ################################################################################################ # +# TASK CHART CONFIG # +# ################################################################################################ # +tasks: + # enable task definition chart + enabled: true + + # Default values for chart. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + registry: image-registry.openshift-image-registry.svc:5000 + namespace: ods + pushRegistry: image-registry.openshift-image-registry.svc:5000 + + + # Optional Values + + # Custom task kind (defaults to "Task") + # taskKind: "ClusterTask" + + # Custom task prefix (defaults to "ods") + # taskPrefix: "foo" + + # To define build task specific sidecars and quotas, add resources/sidecar section(s) per task, + # e.g. + # + # go: + # # define custom resource quotas for the go build task + # resources: + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # sidecars: + # # sidecars added to go build task + # - workspaces: null + # image: postgres + # name: postgres-sidecar + # resources: + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi diff --git a/docs/admin-installation.adoc b/docs/admin-installation.adoc deleted file mode 100644 index e2b88422..00000000 --- a/docs/admin-installation.adoc +++ /dev/null @@ -1,124 +0,0 @@ -# Admin Installation Guide -:toc: - -This guide will show how to install `ods-pipeline` in an existing ODS cluster. Note this is a one-time installation done by a cluster admin and does not need to be repeated for every project that wants to use `ods-pipeline`. - -NOTE: If you are not a cluster admin but still want to give ODS pipeline a try, there is a way to install it in your own namespace only, see <>. - -Centrally installed resources are `BuildConfig` and `ImageStream` resources to produce images required by the ODS tasks, as well as the `ClusterTask` resources themselves. - -## Prerequisites - -You'll need: - -* A namespace in an OpenShift cluster (such as `ods`) -* `git`, `oc` and `helm` (with plugins link:https://github.com/databus23/helm-diff[`helm-diff`] and link:https://github.com/jkroepke/helm-secrets[`helm-secrets`]) installed locally -* to be logged into OpenShift on the command line *as a cluster admin* - -## Instructions - -First, create a repository in Bitbucket, e.g. `ods`. The name can be anything, but since the repository will define the resources in namespace `ods` in code, it makes sense to mirror the namespace name. Clone the repository locally and make an initial commit, e.g. by adding a readme file. - -IMPORTANT: The following commands will fail in an empty Git repository, so make sure you have made at least one commit in the repository. - -Then, use `git subtree` to get the required sources. The following commands may look a bit complicated, but in a nutshell, they are simply adding one folder (`deploy/central`) from the `opendestack/ods-pipeline` repository at the given revision (e.g. `master`) into your new local repository at the path `ods-pipeline`. - -``` -pipelineGitRef=v0.2.0 # Pick the version you want to install - -git fetch --depth=1 https://github.com/opendevstack/ods-pipeline.git $pipelineGitRef:ods-pipeline-$pipelineGitRef && \ -git checkout ods-pipeline-$pipelineGitRef && \ -git subtree split --prefix=deploy/central -b subtree-split-branch-$pipelineGitRef && \ -git checkout - && \ -git subtree add --squash --prefix=ods-pipeline subtree-split-branch-$pipelineGitRef -``` - -Once this is done, change to the new folder `ods-pipeline` to configure the values and secrets to use for the installation. - -For the values, just run: -``` -cp chart/values.yaml values.yaml -``` - -Regarding the secrets, it is recommended to encrypt them at rest, therefore the following describes how to first encrypt them, but also how to edit them using the `helm-secrets` plugin. - -For this, you'll need to install `link:https://github.com/mozilla/sops[sops]` and `link:https://github.com/FiloSottile/age[age]` for en-/decryption of the `secrets.yaml` file. - -As described in the `sops` link:https://github.com/mozilla/sops#22encrypting-using-age[documentation], when decrypting -using `age`, `sops` will look for a text file name `keys.txt` located in a `sops` subdirectory of your user -configuration directory. On Linux, this would be `$XDG_CONFIG_HOME/sops/age/keys.txt` (if `$XDG_CONFIG_HOME` is not set, -it is usually `$HOME/.config`). On macOS, this would be`$HOME/Library/Application Support/sops/age/keys.txt`. On Windows, -this would be `%AppData%\sops\age\keys.txt`. You can specify the location of this file manually by setting the -environment variable `SOPS_AGE_KEY_FILE`. The following will refer to this user configuration directory as ``. - -Knowing this, a key pair for encryption can be created by running: - -``` -mkdir -p /sops/age -age-keygen -o /sops/age/keys.txt -``` - -This prints as a result the public key (alternatively you can find it in the `keys.txt`) which looks similar to this: -``` -Public key: age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p -``` - -The following will refer to this public key as ``. -Take the public key and use it to create an encrypted version of your `secrets.yaml`: - -``` -sops --encrypt --age chart/secrets.yaml > secrets.yaml -``` - -NOTE: you can add multiple recipients (e.g.: each team member has its own age key) comma-separated: - -``` -sops --encrypt --age , chart/secrets.yaml > secrets.yaml -``` - -Now you can edit the secrets with: -``` -helm secrets edit secrets.yaml -``` - -CAUTION: If you configure an Aqua scanner download URL, make sure that username/password are URL-encoded and that the `scannercli` version matches your Aqua server version. - -Then you can install the `images` chart via `./install.sh -n ods --chart=images --values=values.images.yaml,secrets.images.yaml` (make sure to replace the namespace). You may also use `--dry-run` to see the changes first. This will install `BuildConfig` and `ImageStream` resources - start a build for each created `BuildConfig`. - -In the same way, you can install the `tasks` chart via `./install.sh -n ods --chart=tasks --values=values.tasks.yaml`. You may also use `--dry-run` to see the changes first. This will install the ODS tasks in cluster scope. - -IMPORTANT: In ODS 4.0.0, the central Nexus instance does not have the repositories `ods-temporary-artifacts` and `ods-permanent-artifacts` after the default ODS installation. If those repositories are not present in your Nexus instance yet, you will need to create them manually. The repositories are of type "raw" and should not allow re-deployment of artifacts. It is recommended to use blob stores for both. As administrator, you may prune the `ods-temporary-artifacts` repository using cleanup policies of your own choosing. The `ods-permanent-artifacts` repository should not be cleaned up or have a retention period matching your organisation policy of record retention. - -Now your `ods` namespace is fully setup and users can start to utilize Tekton pipelines for their repositories. - -## Namespaced installation - -If you are not cluster admin and still want to use ODS pipeline, you may also install ODS pipeline in your own namespace. In this case, both the "admin installation" and the "user installation" are in the same namespace. - -WARNING: This type of installation is not recommended and should only be used when a central installation is not possible. Installing in your own namespace means you need to maintain Tekton tasks and container images yourself. - -The installation procedure is the same as described above, with the following amendments: - -* Instead of the `ods` namespace, pick your own namespace, such as `foo-cd` -* In `values.images.yaml` and `values.tasks.yaml` add `taskKind: Task` - -IMPORTANT: Don't forget to refer to your tasks in `ods.yaml` with `kind: Task` instead of `kind: ClusterTask` - -## Updating - -You may fetch updates (e.g. new versions) of `ods-pipeline` like this: -``` -pipelineGitRef=v0.2.0 # Pick the version you want to install - -git fetch --depth=1 https://github.com/opendevstack/ods-pipeline.git $pipelineGitRef:ods-pipeline-$pipelineGitRef && \ -git checkout ods-pipeline-$pipelineGitRef && \ -git subtree split --squash --prefix=deploy/central -b subtree-split-branch-$pipelineGitRef && \ -git checkout - && \ -git subtree merge --prefix=ods-pipeline subtree-split-branch-$pipelineGitRef --squash -``` - -IMPORTANT: Compare if any new values have been introduced and update the values and secrets file accordingly. Make sure that the version-related information matches the checked out Git ref. - -Then you can update the `images` chart via `./install.sh -n ods --chart=images --values=values.images.yaml,secrets.images.yaml`. You may also use `--dry-run` to see the changes first. This will install `BuildConfig` and `ImageStream` resources in the `ods` namespace. - -In the same way, you can update the `tasks` chart via `./install.sh -n ods --chart=tasks --values=values.tasks.yaml`. You may also use `--dry-run` to see the changes first. This will install the ODS tasks in cluster scope. diff --git a/docs/adr/20211025-release-name-suffix.md b/docs/adr/20211025-release-name-suffix.md index 27451591..5d76cd45 100644 --- a/docs/adr/20211025-release-name-suffix.md +++ b/docs/adr/20211025-release-name-suffix.md @@ -4,7 +4,7 @@ Date: 2021-10-25 ## Status -Accepted +Superseded by [Namespaced Installation](20220308-namespaced-installation.md) ## Context diff --git a/docs/adr/20220308-namespaced-installation.md b/docs/adr/20220308-namespaced-installation.md new file mode 100644 index 00000000..d9bb28fc --- /dev/null +++ b/docs/adr/20220308-namespaced-installation.md @@ -0,0 +1,27 @@ +# Namespaced installation + +Date: 2022-03-08 + +## Status + +Accepted + +Supersedes [Release name suffix](20211025-release-name-suffix.md) + +## Context + +Build tasks are not easily customizable (e.g. with respect to resource consumption/quotas or sidecars) in a centralized deployment model. In addition, for every change to a task, cluster admin rights are required. + +## Decision + +Abolish a central installation altogether. Provide images from a central registry (ghcr.io). For a detailed discussion, refer to [#404](https://github.com/opendevstack/ods-pipeline/pull/404). + +## Consequences + +Every project needs to build images themselves, resulting in the installation being + +1. simpler to understand as there is no admin/user distinction +2. simpler to install as no collaboration with a cluster admin is required at all +3. simpler to customize since images are then controlled by users + +Projects can use or base their images on public `ods-pipeline` images available via ghcr.io, thus greatly lowering the burden this approach entails. diff --git a/docs/architecture/component-central-installation.puml b/docs/architecture/component-central-installation.puml deleted file mode 100644 index e0d98c51..00000000 --- a/docs/architecture/component-central-installation.puml +++ /dev/null @@ -1,59 +0,0 @@ -@startuml "central-installation" -!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Container.puml -!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Component.puml -' uncomment the following line and comment the first to use locally -' !include C4_Container.puml - -LAYOUT_LEFT_RIGHT() - -Container_Boundary(c1, "Central ODS Pipeline Installation"){ - Component(task_build_go, "ods-build-go", "ClusterTask", "Builds Go (module) applications") - Component(image_go_toolset, "ods/go-toolset", "Container Image", "Go 1.16, golangci-lint, build script") - - Component(task_build_typescript, "ods-build-typescript", "ClusterTask", "Builds TypeScript applications") - Component(image_node16_typescript_toolset, "ods/node16-typescript-toolset", "Container Image", "TypeScript 3, build script") - - Component(task_build_python, "ods-build-python", "ClusterTask", "Builds Python applications") - Component(image_python_toolset, "ods/python-toolset", "Container Image", "Python 3, build script") - - Component(task_build_gradle, "ods-build-gradle", "ClusterTask", "Builds Gradle-based applications") - Component(image_gradle_toolset, "ods/gradle-toolset", "Container Image", "JDK, Gradle, build script") - - Component(task_package_image, "ods-package-image", "ClusterTask", "Packages container images") - Component(image_buildah, "ods/buildah", "Container Image", "Buildah, Aqua scanner") - - Component(task_deploy_helm, "ods-deploy-helm", "ClusterTask", "Deploys Helm charts") - Component(image_helm, "ods/helm", "Container Image", "Helm, Skopeo, deploy script") - - Component(task_start, "ods-start", "ClusterTask", "Starts pipeline run (checkout repository, set build status, download artifacts, ...)") - Component(image_start, "ods/start", "Container Image", "Git, start script") - - Component(task_finish, "ods-finish", "ClusterTask", "Finishes pipeline run (set build status, upload artifacts, ...)") - Component(image_finish, "ods/finish", "Container Image", "Finish script") - - Component(image_sonar, "ods/sonar", "Container Image", "sonar-scanner") - - Component(image_pipeline_manager, "ods/pipeline-manager", "Container Image", "Webhook receiver and pipeline manager") -} - -Rel(task_build_go, image_go_toolset, "uses") -Rel(task_build_go, image_sonar, "uses") - -Rel(task_build_typescript, image_node16_typescript_toolset, "uses") -Rel(task_build_typescript, image_sonar, "uses") - -Rel(task_build_python, image_python_toolset, "uses") -Rel(task_build_python, image_sonar, "uses") - -Rel(task_build_gradle, image_gradle_toolset, "uses") -Rel(task_build_gradle, image_sonar, "uses") - -Rel(task_package_image, image_buildah, "uses") - -Rel(task_deploy_helm, image_helm, "uses") - -Rel(task_start, image_start, "uses") - -Rel(task_finish, image_finish, "uses") - -@enduml diff --git a/docs/architecture/component-local-installation.puml b/docs/architecture/component-local-installation.puml deleted file mode 100644 index acca5cec..00000000 --- a/docs/architecture/component-local-installation.puml +++ /dev/null @@ -1,19 +0,0 @@ -@startuml "local-installation" -!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Container.puml -!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Component.puml -' uncomment the following line and comment the first to use locally -' !include C4_Container.puml - -LAYOUT_LEFT_RIGHT() - -Container_Boundary(c1, "Local ODS Pipeline Installation"){ - Component(route, "Route", "Route resource", "External endpoint for Bitbucket webhook") - - Component(ods_pipeline_manager, "ODS Pipeline Manager", "Custom service", "Manage pipelines") - - Component(config_maps, "Config Maps", "ConfigMap resources", "Configuration for consumption by pipelines") - - Component(secrets, "Secrets", "Secret resources", "Secret for consumption by pipelines") -} - -@enduml diff --git a/docs/architecture/component-namespaced-installation.puml b/docs/architecture/component-namespaced-installation.puml new file mode 100644 index 00000000..092e58bd --- /dev/null +++ b/docs/architecture/component-namespaced-installation.puml @@ -0,0 +1,68 @@ +@startuml "namespaced-installation" +!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4.puml +!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Container.puml +!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Component.puml +' uncomment the following line and comment the first to use locally +' !include C4_Container.puml + +LAYOUT_LEFT_RIGHT() + +Container_Boundary(c1, "Namespaced ODS Pipeline Installation"){ + Boundary(config, "Configuration & Management") { + Component(route, "Route", "Route resource", "External endpoint for Bitbucket webhook") + Component(ods_pipeline_manager, "ODS Pipeline Manager", "Custom service", "Manage pipelines") + Component(config_maps, "Config Maps", "ConfigMap resources", "Configuration for consumption by pipelines") + Component(secrets, "Secrets", "Secret resources", "Secret for consumption by pipelines") + } + + Boundary(tasks, "Tasks") { + Component(task_build_go, "ods-build-go", "Task", "Builds Go (module) applications") + Component(task_build_typescript, "ods-build-typescript", "Task", "Builds TypeScript applications") + Component(task_build_python, "ods-build-python", "Task", "Builds Python applications") + Component(task_build_gradle, "ods-build-gradle", "Task", "Builds Gradle-based applications") + Component(task_package_image, "ods-package-image", "Task", "Packages container images") + Component(task_deploy_helm, "ods-deploy-helm", "Task", "Deploys Helm charts") + Component(task_start, "ods-start", "Task", "Starts pipeline run (checkout repository, set build status, download artifacts, ...)") + Component(task_finish, "ods-finish", "Task", "Finishes pipeline run (set build status, upload artifacts, ...)") + } + + Boundary(images, "Images") { + Component(image_go_toolset, "ods/go-toolset", "Container Image", "Go 1.16, golangci-lint, build script") + Component(image_node16_typescript_toolset, "ods/node16-typescript-toolset", "Container Image", "TypeScript 3, build script") + Component(image_python_toolset, "ods/python-toolset", "Container Image", "Python 3, build script") + Component(image_gradle_toolset, "ods/gradle-toolset", "Container Image", "JDK, Gradle, build script") + Component(image_buildah, "ods/buildah", "Container Image", "Buildah, Aqua scanner") + Component(image_helm, "ods/helm", "Container Image", "Helm, Skopeo, deploy script") + Component(image_start, "ods/start", "Container Image", "Git, start script") + Component(image_finish, "ods/finish", "Container Image", "Finish script") + Component(image_sonar, "ods/sonar", "Container Image", "sonar-scanner") + Component(image_pipeline_manager, "ods/pipeline-manager", "Container Image", "Webhook receiver and pipeline manager") + } + +} + +Rel(task_build_go, image_go_toolset, "uses") +Rel(task_build_go, image_sonar, "uses") + +Rel(task_build_typescript, image_node16_typescript_toolset, "uses") +Rel(task_build_typescript, image_sonar, "uses") + +Rel(task_build_python, image_python_toolset, "uses") +Rel(task_build_python, image_sonar, "uses") + +Rel(task_build_gradle, image_gradle_toolset, "uses") +Rel(task_build_gradle, image_sonar, "uses") + +Rel(task_package_image, image_buildah, "uses") + +Rel(task_deploy_helm, image_helm, "uses") + +Rel(task_start, image_start, "uses") + +Rel(task_finish, image_finish, "uses") + +Rel(ods_pipeline_manager, route, "exposed via") + +config_maps -[hidden]- secrets + +@enduml diff --git a/docs/architecture/container-system.puml b/docs/architecture/container-system.puml index 85bb4051..10b7b3b7 100644 --- a/docs/architecture/container-system.puml +++ b/docs/architecture/container-system.puml @@ -11,9 +11,7 @@ Person(developer, "Developer") System_Boundary(pipeline, "ODS Pipeline"){ - Container(local_installation, "Local ODS Pipeline Installation", "Kubernetes resources", "ODS pipeline manager + config maps and secrets") - - Container(central_installation, "Central ODS Pipeline Installation", "Kubernetes resources", "Tekton tasks to use in pipelines, and related images") + Container(installation, "ODS Pipeline Installation", "Kubernetes resources", "ODS pipeline manager, config maps and secrets + Tekton tasks to use in pipelines, and related images") Container(pipeline_run, "Pipeline Run", "PipelineRun Kubernetes resources", "Run of one pipeline") @@ -50,10 +48,9 @@ Rel(pipeline_run, nexus, "uploads artifacts") Rel(pipeline_run, sonarqube, "analyzes source code") Rel(pipeline_run, aqua, "scans for vulnerabilities") Rel(pipeline_run, bitbucket, "checks out source code, sets build status") -Rel(bitbucket, local_installation, "triggers") -Rel(local_installation, pipeline_run, "creates") -Rel(pipeline_run, local_installation, "uses config maps and secrets") -Rel(pipeline_run, central_installation, "uses tasks") +Rel(bitbucket, installation, "triggers") +Rel(installation, pipeline_run, "creates") +Rel(pipeline_run, installation, "uses config maps, tasks and secrets") Rel(pipeline_run, release_namespace, "deploys to") Rel(developer, bitbucket, "pushes") Rel(pipeline_run, webhook_receiver, "send status notification", $tags="optional") diff --git a/docs/authoring-tasks.adoc b/docs/authoring-tasks.adoc index 56864e52..61f72f7c 100644 --- a/docs/authoring-tasks.adoc +++ b/docs/authoring-tasks.adoc @@ -12,14 +12,16 @@ ODS provides tasks out-of-the-box that can be used e.g. to build and deploy Go a If you need more control than the official tasks offer, then creating a custom Tekton task is the way to go. You may also use a Tekton task provided by someone else, e.g. those from the link:https://github.com/tektoncd/catalog[Tekton Catalog]. -== Ceating your own task +== Creating your own task -A Tekton task is a Kubernetes resource in your OpenShift project. To create a task, simply provide the YAML definition of the task in the OpenShit console. Then you are able to reference this task from your pipeline definition (`ods.yaml`) just like official ODS tasks. The only difference is that ODS tasks are typically available as cluster tasks (available to every namespace) in contrast to your custom task which will only be availble within your own namespace. +A Tekton task is a Kubernetes resource in your OpenShift project. To create a task, simply provide the YAML definition of the task in the OpenShit console. Then you are able to reference this task from your pipeline definition (`ods.yaml`) just like official ODS tasks. A task consists of one or more steps, and each step is a container that runs in a pod. Therefore, when you create your task, you will need to define at least one step, which will need to define the container it will execute. For example, if you want to use the `curl` program in your custom task, then you will need to launch a container which has `curl` installed. This means that you either need to find a container image that has the binaries installed that you want to execute, or you need to build a container image first that suits your needs, before using it in your task definition. The simplest YAML definition of a task looks like this: -``` + +[source] +---- apiVersion: tekton.dev/v1beta1 kind: Task metadata: @@ -31,9 +33,9 @@ spec: image: busybox script: | echo hello world -``` +---- -Of course this doesn't do anything useful in the CI pipeline. Typically you'd need to mount a workspace (containing the Git repository you are working in) and maybe offer some parameters to the user of this task. Have a look at the official ODS tasks for more sophisticated examples of existing tasks. Later on in this document we'll look at some example tasks you could create. +Of course this doesn't do anything useful in the CI pipeline. Typically, you'd need to mount a workspace (containing the Git repository you are working in) and maybe offer some parameters to the user of this task. Have a look at the official ODS tasks for more sophisticated examples of existing tasks. Later on in this document we'll look at some example tasks you could create. In the example above, we are using the `busybox` image, which executes what is defined in `script` in a shell (`sh`). Since `echo` is available in `sh`, we can print the words `hello world` to the log output. @@ -62,11 +64,13 @@ In theory you can use pretty much any image that works in OpenShift (e.g. the im === How do I create my own container image to use in a task? -In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. You may also use the YAML definitions in `deploy/central/images` as an example. +In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. You may also use the YAML definitions in `deploy/ods-pipeline/charts/images` as an example. + +Occasionally, you might want to extend the images used in an official tasks, e.g. to deploy additional CA certificates, configure proxy settings, etc. The `images` subchart of `ods-pipeline` provides build configurations that allow you to create images that are based on the official `ods-pipeline` images from ghcr.io. The build configurations include inline Dockerfiles that you can adjust to suit your specific needs. === How can I test my tasks? -Official ODS tasks are provided with automated tests. These tests are written in Go, and can be executed locally (in a KinD cluster) via `make test`. Each test creates a `TaskRun` with certain parameters and then checks the result of the run and the state of the workspace after the run. This allows to test each task in isolation and before using the task in a pipeline in an actual OpenShift cluster. If you want, you should be able to make use of this task testing framework for your own custom tasks. However this has not been documented yet and likely needs a few adjustments to work well. +Official ODS tasks are provided with automated tests. These tests are written in Go, and can be executed locally (in a KinD cluster) via `make test`. Each test creates a `TaskRun` with certain parameters and then checks the result of the run and the state of the workspace after the run. This allows to test each task in isolation and before using the task in a pipeline in an actual OpenShift cluster. If you want, you should be able to make use of this task testing framework for your own custom tasks. However, this has not been documented yet and likely needs a few adjustments to work well. == Examples @@ -113,13 +117,11 @@ pipeline: workspace: shared-workspace ---- -Notice that the `taskRef.kind` is set to `Task` instead of `ClusterTask` (which is used for tasks which are provided by `ods-pipeline`). - === Customizing how Go applications are built While ODS offers a task to build Go applications, that task is quite opinionated and does not offer a lot of control for you as a user. For example, it will lint your code with `golangci-lint` and you cannot disable this step. This is by design to allow the platform to make certain assumptions about software created by ODS tasks. However, imagine you have some legacy code that will not pass linting and you are unable to change this (quickly). How would you create a task that does not run the linter? -As a first step, locate the `ods-build-go` cluster task in OpenShift ("Pipelines > Tasks > ClusterTasks > ods-build-go-vX.X.X") and copy the YAML. Clean up the YAML to create a new `Task`, e.g. named `build-go`, in your own namespace. A simple task would look like this: +As a first step, locate the `ods-build-go` task in OpenShift ("Pipelines > Tasks > ods-build-go-vX.X.X") and copy the YAML. Clean up the YAML to create a new `Task`, e.g. named `build-go`, in your own namespace. A simple task would look like this: [source,yaml] ---- diff --git a/docs/creating-an-ods-task.adoc b/docs/creating-an-ods-task.adoc index c1aa3359..e0e9a5e6 100644 --- a/docs/creating-an-ods-task.adoc +++ b/docs/creating-an-ods-task.adoc @@ -10,9 +10,9 @@ To create a technology-specific Task (e.g. python), the following files should b - [ ] build/package/Dockerfile.python-toolset - The Dockerfile with the dependencies and runtime. - [ ] build/package/scripts/build-python.sh - Bash script to carry out the build, linting, testing operations. -- [ ] deploy/central/images/images-chart/templates/bc-ods-build-python.yaml - BuildConfig to generate the ods-build-python image. -- [ ] deploy/central/images/images-chart/templates/is-ods-build-python.yaml - Create ImageStream resource in OpenShift. -- [ ] deploy/central/tasks-charts/templates/task-ods-build-python.yaml - The Tekton Task. +- [ ] deploy/ods-pipeline/charts/images/templates/bc-ods-build-python.yaml - BuildConfig to generate the ods-build-python image. +- [ ] deploy/ods-pipeline/charts/images/templates/is-ods-build-python.yaml - Create ImageStream resource in OpenShift. +- [ ] deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml - The Tekton Task. - [ ] docs/tasks/task-ods-build-python.adoc - To describe the task and its parameters. - [ ] test/tasks/ods-build-python_test.go - A test file to test the behavior of the Tekton Task. - [ ] test/testdata/workspaces/python-fastapi-sample-app - Sample application that will be used to test the Task. @@ -20,6 +20,6 @@ To create a technology-specific Task (e.g. python), the following files should b The following files should be **updated**: - [ ] .github/workflows/main.yaml - Build ods python image and push it to the internal registry. -- [ ] Makefile - Function 'start-ods-central-builds' start builds for each ODS BuildConfig (only Openshift!) +- [ ] Makefile - Function 'start-ods-builds' start builds for each ODS BuildConfig (only Openshift!) // TODO: Elaborate more on how to test a Task diff --git a/docs/design/relationship-shared-library.adoc b/docs/design/relationship-shared-library.adoc index fab48f47..9035bb41 100644 --- a/docs/design/relationship-shared-library.adoc +++ b/docs/design/relationship-shared-library.adoc @@ -1,14 +1,14 @@ -# ODS Pipeline vs. ODS Jenkins Shared Library += ODS Pipeline vs. ODS Jenkins Shared Library The link:https://github.com/opendevstack/ods-jenkins-shared-library[ODS Jenkins Shared library] provides pipelines and tasks to support CI/CD flows using Jenkins. ODS Pipeline is an alternative approach using Tekton instead of Jenkins. Therefore, on a high-level, both ODS Pipeline and ODS Jenkins Shared library share the same goals. However, there are many differences between the two options, not only in terms of functionality and maturity, but also in terms of approach and concepts. -## High-Level Comparison +== High-Level Comparison The Jenkins shared library can be used in a `Jenkinsfile` to avoid repeatedly creating common tasks such as building a container image from a `Dockerfile` in the Git repository. In the approach chosen by `ods-pipeline`, the pipeline definition in the `ods.y(a)ml` file is a bit like the `Jenkinsfile`: it defines which steps happen in the CI pipeline. The Tekton tasks provided by `ods-pipeline` (such as `ods-deploy-helm`) are a bit like the stages provided by the shared library (such as `odsComponentPipelineRolloutOpenShiftDeployment`). The main difference between the Jenkins shared library and the Tekton-based approach is that users of Jenkins can script their CI pipeline in the `Jenkinsfile`, whereas Tekton pipelines are only a series of tasks defined in YAML, which is way less flexible. Another important difference is that the Jenkins shared library does not contain any language-specific instructions (such as how to build Java applications or how to build Python applications), in contrast to `ods-pipeline`, which provides one opinionated task per language (e.g. `ods-build-python`). -## Release Manager +== Release Manager The concept of multiple component repos and one umbrella repo is the same in both options. With `ods-pipeline`, each individual component repo has an `ods.yml` which defines a pipeline. This pipeline produces artifacts, stored in Nexus, which can be used later by the umbrella repo. @@ -29,24 +29,24 @@ Following are some details on what that means practically: Finally, a few thoughts on enforcing certain tasks / flexibility / adhering to a standard. This is in the context of GAMP or medical device software, in which a platform would like to ensure certain things to happen before software reaches production. -In general, I see two main approaches to address this: +In general, we see two main approaches to address this: 1. Introduce next to the pipeline field in ods.yml another field which describes a certain kind of pipeline in which some tasks are predefined and cannot be changed by the user. E.g. a `samdPipeline` or `levaPipeline` could pre-configure a pipeline with certain tasks and allow only very limited customisation. -2. Use the "release manager / doc gen task" described above to verify certain tasks have run or that certain artifacts are present. E.g. the task could check that there are xUnit test results for all components which are part of the app. Further, the task could check that all tasks of the pipeline are of kind `ClusterTask` and start with `ods-``. Those tasks can be assumed to be qualified. If the user added other tasks, they would need to explicitly opt-in as they need to provide their own qualification documents for those tasks then. +2. Use the "release manager / doc gen task" described above to verify certain tasks have run or that certain artifacts are present. E.g. the task could check that there are xUnit test results for all components which are part of the app. Further, the task could check that all tasks of the pipeline are of kind `Task` and start with `ods-``. Those tasks can be assumed to be qualified. If the user added other tasks, they would need to explicitly opt-in as they need to provide their own qualification documents for those tasks then. -## Document generation +== Document generation -At the moment, the orchestration pipeline only sends requests to the doc gen service, which is running in each namespace, and receives rendered documents. I believe that this architecture is needlessly complex for the Tekton approach. Instead of having a long-running service in each namespace which consumes resources and is hard to maintain (as every initiative needs to do that themselves), we could simply call the service classes from the doc gen services directly from the Groovy code of the new "release manager / doc gen task". +At the moment, the orchestration pipeline only sends requests to the doc gen service, which is running in each namespace, and receives rendered documents. We believe that this architecture is needlessly complex for the Tekton approach. Instead of having a long-running service in each namespace which consumes resources and is hard to maintain (as every initiative needs to do that themselves), we could simply call the service classes from the doc gen services directly from the Groovy code of the new "release manager / doc gen task". -## Quickstarters +== Quickstarters Quickstarters currently provide a `Jenkinsfile.template` which will get rendered into the resulting component repository. With `ods-pipeline`, this would be replaced by an `ods.yml` file. -Furhter, the quickstarter provisioning itself is also done by a Jenkins pipline, based on the `Jenkinsfile` in the quickstarter. This also needs to be replaced with something. Right now, the quickstart process consists of creating a repo, and creating OpenShift resources. I would stop creating OpenShift resources in the new world, and simply require each quickstarter to supply a Helm chart that is then deployed automatically with the first pipeline run (we need to be infrastructure-as-code with Helm as no magic export functionality exists like with Tailor). With that in mind, we only need to create a repo. Often, that step includes running tech-specific tasks (e.g. generators). I suggest that quickstarter authors define a TaskRun, which then gets executed. That way they can pick their own container image and do whatever they want, in whatever language they want. +Further, the quickstarter provisioning itself is also done by a Jenkins pipeline, based on the `Jenkinsfile` in the quickstarter. This also needs to be replaced with something. Right now, the quickstart process consists of creating a repo, and creating OpenShift resources. We would stop creating OpenShift resources in the new world, and simply require each quickstarter to supply a Helm chart that is then deployed automatically with the first pipeline run (we need to be infrastructure-as-code with Helm as no magic export functionality exists like with Tailor). With that in mind, we only need to create a repo. Often, that step includes running tech-specific tasks (e.g. generators). We suggest that quickstarter authors define a TaskRun, which then gets executed. That way they can pick their own container image and do whatever they want, in whatever language they want. Users will still be able to consume quickstarters via the provisioning app. -Authoring a new quickstarter means potentially creating a new task (say a Rust task). This task would then need to be installed centrally as a ClusterTask like the others. I do not know yet how a less official task could be shared easily ... each namespace would need to install that task separately. +Authoring a new quickstarter means potentially creating a new task (say a Rust task). We do not know yet how a less official task could be shared easily ... each namespace would need to install that task separately. To create such a task, authors need to have: * expert knowledge of the technology involved in the QS (such as Rust for example) @@ -55,31 +55,29 @@ To create such a task, authors need to have: * enough knowledge of Go to supply an automated test using the test framework * some knowledge of any programming language to write the logic of the task. Go would be preferred here but is not required. If it is very little logic, bash will do fine as well. -## Pro's / Con's and Limitations +== Pro's / Con's and Limitations The following pro's and con's are from a platform perspective, not necessarily from an individual developer's perspective. Also, these are in relation to the existing shared library approach. -### Pro +=== Pro * Tekton is very rigid compared to Jenkins: the task list is a simple array. No crazy scripting possible. -* There is a very clear separation between "official" tasks at cluster scope and custom tasks which are at namespace scope only. Support teams can easily identify which parts of the pipeline use supported tasks and which don't. -* Tekton tasks have a pretty clear interface (parameters and results). Updating between versions should be easy and predictable. However we need to be cautious not to depend to much on workspace state. -* As pipelines cannot define inline tasks (only pipeline runs can), users cannot define how to build an application (e.g. if they want to run a linter or not). Instead users must choose from the official task catalog. This allows to control much better how applications are build and to improve on that process. +* Tekton tasks have a pretty clear interface (parameters and results). Updating between versions should be easy and predictable. However, we need to be cautious not to depend too much on workspace state. * No long-running Jenkins instance which saves 5Gi memory per project * No complicated base images - the existing Jenkins solution is a bit brittle where many updates of the base images (be it Jenkins, Java, or something else) broke something down the road * Jenkins had only one agent image, which made it hard to use for monorepos using multiple technologies (e.g. TypeScript and Java). The Tekton approach should handle monorepos and multiple repos equally well. -* The Tekton implementation can run in a pure Kubernetes cluster, allowing the test suite to be executed in GitHub Actions. Also the target cluster does not have to be OpenShift, allowing to deploy into EKS for example. +* The Tekton implementation can run in a pure Kubernetes cluster, allowing the test suite to be executed in GitHub Actions. Also, the target cluster does not have to be OpenShift, allowing to deploy into EKS for example. * The artifact approach avoids the need to run all components in the release manager pipeline (speeding things up) while at the same time storing all relevant information which should be useful for GxP/SaMD. -### Con +=== Con -* Tekton tasks are only customizable via paramters so many people might need to create their own tasks because the platform cannot cover all use cases. -* Each task is one pod - this cause performance overhead as spinning up pods is a bit slow. Jenkins spins up only one pod and therefore is faster. +* Tekton tasks are only customizable via parameters so many people might need to create their own tasks because the platform cannot cover all use cases. +* Each task is one pod - this causes performance overhead as spinning up pods is a bit slow. Jenkins spins up only one pod and therefore is faster. * There is no way to install plugins or use the UI to e.g. see test execution trends. -### Limitations +=== Limitations -* Pipeline users cannot specify task resources. This was possible in Jenkins and also used by many users. See issue https://github.com/opendevstack/ods-pipeline/issues/195. Currently support is not even on the Tekton roadmap. Only workaround: multiple tasks or high defaults. If that does not work, users must create their own copies of the tasks. -* Pipeline users cannot specify sidecars. This was possible in Jenkins and also used by many users (e.g. to spin up a database for testing). See issue https://github.com/opendevstack/ods-pipeline/issues/135. Currently support is not even on the Tekton roadmap. Only workaround: multiple tasks. If that does not work (e.g. you need more than one sidecar), users must create their own copies of the tasks. +* Pipeline users cannot specify task resources dynamically without changing the task at hand. This was possible in Jenkins and also used by many users. See issue https://github.com/opendevstack/ods-pipeline/issues/195. Currently, support is not even on the Tekton roadmap. Only workaround: multiple tasks or high defaults. +* Pipeline users cannot specify sidecars dynamically without changing the task at hand. This was possible in Jenkins and also used by many users (e.g. to spin up a database for testing). See issue https://github.com/opendevstack/ods-pipeline/issues/135. Currently, support is not even on the Tekton roadmap. Only workaround: multiple tasks. * As tasks are pods, one needs a PVC to work on. Using a PVC has an effect on how many pipelines can run in parallel. See issue https://github.com/opendevstack/ods-pipeline/issues/160. It would be possible to implement support for one PVC per repo, or even one PVC per branch. diff --git a/docs/design/software-architecture.adoc b/docs/design/software-architecture.adoc index d3efadf0..a107450a 100644 --- a/docs/design/software-architecture.adoc +++ b/docs/design/software-architecture.adoc @@ -38,13 +38,9 @@ The following diagram illustrates in more detail how the various components inte image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/opendevstack/ods-pipeline/master/docs/architecture/container-system.puml[Software System] -As the diagram above shows, the ODS Pipeline installation is made up of two containers, one centrally installed by the cluster admin, and one locally installed by project admins (once per project). The following diagram shows more detail about the central installation: +As the diagram above shows, the ODS Pipeline installation is made up of one container installed by project admins (once per project) containing configuration (config maps and secrets), the pipeline manager and task resources (Tekton task definitions and their referenced images). The following diagram shows more detail about the installation: -image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/opendevstack/ods-pipeline/master/docs/architecture/component-central-installation.puml[Central Installation] - -The following diagram shows more detail about the local installation: - -image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/opendevstack/ods-pipeline/master/docs/architecture/component-local-installation.puml[Local Installation] +image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/opendevstack/ods-pipeline/master/docs/architecture/component-namespaced-installation.puml[Namespaced Installation] === Interfaces @@ -54,7 +50,7 @@ image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubuse | Bitbucket | Webhook Trigger | HTTP POST request -| Local ODS Pipeline Installation +| ODS Pipeline Installation | Endpoint is an exposed event listener. | Task `ods-start` @@ -156,14 +152,11 @@ ODS Pipeline runs on Redhat OpenShift Container Platform, and is embedded into a == Deployment -The are two main deployments of the system: - -* a central installation managed by a cluster administrator -* many local installation managed by project administrators +There is one major deployment (typically in the project's cd-namespace) of the system managed by project administrators. -The central installation provides the tasks that projects can consume as `ClusterTask` resources. The local installation (in `-cd` namespaces) allow to trigger pipelines in response to Bitbucket events. +The installation provides the tasks that projects can consume as `Task` resources. It allows triggering pipelines in response to Bitbucket events. -Both deployments are described as Helm charts: the central installation is based on `deploy/central` and the local installation is based on `cd-namespace/central`. See the link:docs/admin-installation.adoc[Admin Installation Guide] and the link:docs/user-installation.adoc[User Installation Guide]. +The deployment is described in a Helm chart. For details, see the link:../installation.adoc[Installation Guide]. == Appendix diff --git a/docs/design/software-design-specification.adoc b/docs/design/software-design-specification.adoc index 6959c3eb..b32db6b7 100644 --- a/docs/design/software-design-specification.adoc +++ b/docs/design/software-design-specification.adoc @@ -8,7 +8,7 @@ The purpose of this document is to describe the technical realization of the giv == Scope -The following software design specification will provide the technical realization of ODS pipeline version 0.1. +The following software design specification will provide the technical realization of ODS pipeline version 0.2.0. == Definitions and Abbreviations @@ -22,9 +22,9 @@ N/A === Developed components -As described in the architecture, the system is split into two main containers: a central installation and multiple local user installations. This document follows that structure. +As described in the architecture, the system is installed into local namespaces. This document explains the individual components and their interactions. -==== Central ODS Pipeline Installation +==== ODS Pipeline Installation ===== Shared `ods-sonar` image @@ -58,7 +58,7 @@ If the server edition supports it, the branch parameter shall be set, unless the [cols="1,1,3"] |=== | SDS-TASK-1 -| `ods-build-go` ClusterTask resource +| `ods-build-go` Task resource a| The task defines two steps: . Build Go (module) applications (referencing SDS-TASK-2 and executing SDS-TASK-3) @@ -66,7 +66,7 @@ a| The task defines two steps: Input parameters: -* `working-dir`: allows to customize which directory is used as the Go module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. +* `working-dir`: allows customizing which directory is used as the Go module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. * `enable-cgo`: allows to enable `CGO` * `go-os`: sets target operating system (`GOOS`) * `go-arch`: sets target architecture (`GOARCH`) @@ -105,7 +105,7 @@ Supplies default SonarQube project properties file if required (SDS-SHARED-3). [cols="1,1,3"] |=== | SDS-TASK-4 -| `ods-build-gradle` ClusterTask resource +| `ods-build-gradle` Task resource a| The task defines two steps: . Build Gradle module (referencing SDS-TASK-5 and executing SDS-TASK-6) @@ -113,7 +113,7 @@ a| The task defines two steps: Input parameters: -* `working-dir`: allows to customize which directory is used as the Gradle module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. +* `working-dir`: allows customizing which directory is used as the Gradle module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. * `gradle-additional-tasks`: additional gradle tasks to be passed to the gradle build * `gradle-options`: options to be passed to the gradle build * `output-dir`: sets destination directory of built binary @@ -121,7 +121,7 @@ Input parameters: * `sonar-skip`: skips SonarQube analysis | SDS-TASK-5 -| `ods-gradle-toolset` contaner image +| `ods-gradle-toolset` container image | Container image for building Gradle modules. Based on `ubi8/openjdk-17` (SDS-EXT-11), includes SDS-EXT-12, SDS-SHARED-3, SDS-TASK-6 and SDS-TASK-26. | SDS-TASK-6 @@ -130,7 +130,7 @@ a| Builds a Gradle module that provides a gradle build script into `docker/app.j The destination directory can be changed by exporting the environment variable `ODS_OUTPUT_DIR`. -Runs `gradlew clean build` to build the Gradle module, using optionas and additional tasks as passed from SDS-TASK-4. +Runs `gradlew clean build` to build the Gradle module, using options and additional tasks as passed from SDS-TASK-4. Generated unit test reports are placed in the working directory (for SonarQube to pick them up) and copied into `.ods/artifacts/xunit-reports`. @@ -148,7 +148,7 @@ Supplies default SonarQube project properties file if required (SDS-SHARED-3). [cols="1,1,3"] |=== | SDS-TASK-7 -| `ods-start` ClusterTask resource +| `ods-start` Task resource a| Task to start pipeline. References SDS-TASK-8 and executes SDS-TASK-9. Input parameters: TODO @@ -159,9 +159,9 @@ Input parameters: TODO | SDS-TASK-9 | `start` binary -a| The task checks out the repository of given URL and Git ref into the mounted workspace, cleaning previous contents, except for the caching area at `./ods-cache`. If the checked out `ods.y(a)ml` configures any child repositories, those are checked out as well from the configured URL and Git ref. If a release branch (`release/`) corresponding to the current version exists, it is preferred. All checkouts are shallow and include submodules. +a| The task checks out the repository of a given URL and Git ref into the mounted workspace, cleaning previous contents, except for the caching area at `./ods-cache`. If the checked out `ods.y(a)ml` configures any child repositories, those are checked out as well from the configured URL and Git ref. If a release branch (`release/`) corresponding to the current version exists, it is preferred. All checkouts are shallow and include submodules. -A build task may store cached dependencies under directory `.ods-cache/deps//` where technology-name provides a namespace. For example this could be 'npm' if at some point in the future this would be supported. The task deletes files in folder `.ods-cache/deps/`. All other files in `.ods-cache` are reserved for future use. While they are not removed you must not rely on those locations except for experimentation. +A build task may store cached dependencies under directory `.ods-cache/deps//` where technology-name provides a namespace. For example this could be 'npm' if at some point in the future this would be supported. The task deletes files in folder `.ods-cache/deps/`. All other files in `.ods-cache` are reserved for future use. While they are not removed you must not rely on those locations except for experimentation. Context information is stored under `.ods` for each checked out repository: @@ -181,13 +181,13 @@ If any child repository is missing a successful pipeline run artifact for the ch [cols="1,1,3"] |=== | SDS-TASK-10 -| `ods-finish` ClusterTask resource +| `ods-finish` Task resource a| Task to finish pipeline. References SDS-TASK-11 and executes SDS-TASK-12. Input parameters: TODO | SDS-TASK-11 -| `ods-finish` contaner image +| `ods-finish` container image | Container image to start a pipeline. Based on `ubi8/ubi-minimal` (SDS-EXT-2), includes SDS-TASK-12. | SDS-TASK-12 @@ -211,7 +211,7 @@ Status notification message, webhook URL, content type, HTTP method, and trigger [cols="1,1,3"] |=== | SDS-TASK-13 -| `ods-build-python` ClusterTask resource +| `ods-build-python` Task resource a| The task defines two steps: . Build Python applications (referencing SDS-TASK-14 and executing SDS-TASK-15) @@ -220,7 +220,7 @@ a| The task defines two steps: Input parameters: TODO | SDS-TASK-14 -| `ods-python-toolset` contaner image +| `ods-python-toolset` container image | Container image to build Python applications. Based on `ubi8/python-39` (SDS-EXT-28), includes SDS-SHARED-3, SDS-TASK-15 and SDS-TASK-27. | SDS-TASK-15 @@ -240,10 +240,12 @@ Supplies default SonarQube project properties file if required (SDS-SHARED-3). | Default configuration for Python SonarQube project. |=== +===== `ods-build-typescript` task + [cols="1,1,3"] |=== | SDS-TASK-16 -| `ods-build-typescript` ClusterTask resource +| `ods-build-typescript` Task resource a| The task defines two steps: . Build TypeScript applications (referencing SDS-TASK-17 and executing SDS-TASK-18) @@ -251,7 +253,7 @@ a| The task defines two steps: Input parameters: -* `working-dir`: allows to customize which directory is used as the TypeScript module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. +* `working-dir`: allows customizing which directory is used as the TypeScript module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. * `output-dir`: sets destination directory of the build output * `build-dir`: sets source directory of the build output * `copy-node-modules`: enables copying node_modules directory to the output directory @@ -274,9 +276,9 @@ If `copy-node-modules` is `true` the `node_modules` directory is copied into the For traceability package.json and package-lock.json are copied into the `dist` directory inside the output directory as well. This happens at the end of the script execution to avoid confusing the subsequent running of tests. -Runs `npm run test`, creating code coverage and xUnit reports. The artifacts are placed in the working directory and in `.ods/artifacts/code-coverage` and `.ods/artifacts/xunit-reports`, respectively. +Runs `npm run test`, creating code coverage and xUnit reports. The artifacts are placed in the working directory and in `.ods/artifacts/code-coverage` and `.ods/artifacts/xunit-reports`, respectively. If the artifacts are already found in `.ods/artifacts`, then testing is skipped and the artifacts are copied to the working directory to expose them to SonarQube. -Runs `eslint` to lint to lint the source code and fails if there are any errors or warnings. The files to lint default to all files with an `.js`, `.ts`, `.jsx`, `.tsx` extension inside `src` and can be set by the `lint-file-ext` task parameter. The amount of allowed warnings defaults to 0 and can be set by the `max-lint-warnings` task parameter. +Runs `eslint` to lint the source code and fails if there are any errors or warnings. The files to lint default to all files with an `.js`, `.ts`, `.jsx`, `.tsx`, `.svelte` extension inside `src` and can be set by the `lint-file-ext` task parameter. The amount of allowed warnings defaults to 0 and can be set by the `max-lint-warnings` task parameter. Supplies default SonarQube project properties file if required (SDS-SHARED-3). @@ -290,11 +292,11 @@ Supplies default SonarQube project properties file if required (SDS-SHARED-3). [cols="1,1,3"] |=== | SDS-TASK-19 -| `ods-package-image` ClusterTask resource +| `ods-package-image` Task resource | Builds and scans a container image, then pushes it to a registry. References SDS-TASK-20 and executes SDS-TASK-21. | SDS-TASK-20 -| `ods-buildah` contaner image +| `ods-buildah` container image | Container image to build, scan and push images. Based on `ubi8` (SDS-EXT-1), includes SDS-EXT-17, SDS-EXT-18 and SDS-TASK-21. If the build argument `aquasecScannerUrl` is set, the referenced Aqua Scanner binary is installed into the image as well. | SDS-TASK-21 @@ -317,11 +319,11 @@ If the Aqua scanner is installed in the base image, the pushed image shall be s [cols="1,1,3"] |=== | SDS-TASK-22 -| `ods-deploy-helm` ClusterTask resource +| `ods-deploy-helm` Task resource | Deploys a Helm chart and promotes images. References SDS-TASK-23 and executes SDS-TASK-24. | SDS-TASK-23 -| `ods-helm` contaner image +| `ods-helm` container image | Container image to promote images and deploy Helm charts. Based on `ubi8/ubi-minimal` (SDS-EXT-2), includes SDS-EXT-9, SDS-EXT-15, SDS-EXT-17, SDS-EXT-19, SDS-EXT-20, SDS-EXT-21, SDS-EXT-23, SDS-EXT-24 and SDS-TASK-24. | SDS-TASK-24 @@ -344,12 +346,10 @@ Upgrades (or installs) a Helm chart. * A values file containing the Git commit SHA is auto-generated and added to the Helm diff/upgrade invocation. * Any encrypted secrets files are decrypted on the fly, using the age key provided by the `Secret` identified by the `age-key-secret` parameter (defaulting to `helm-secrets-age-key`). The secret is expected to expose the age key under the `key.txt` field. * The "app version" is set to the Git commit SHA and the "version" is set to given `version` if any, otherwise the chart version in `Chart.yaml`. -* Charts in any of the respositories configured in `ods.y(a)ml` are packaged according to the same rules and added as subcharts. +* Charts in any of the repositories configured in `ods.y(a)ml` are packaged according to the same rules and added as subcharts. * The target namespace may also be external to the cluster in which the pipeline runs. The API server is identified by the `apiServer` field of the environment configuration, and the credential token of `apiCredentialsSecret` is used to authenticate. |=== -==== Local ODS Pipeline Installation - ===== Pipeline Manager [cols="1,1,3"] @@ -370,15 +370,16 @@ Upgrades (or installs) a Helm chart. | `pipeline-manager` binary a| The pipeline manager parses the JSON payload and handles `repo:refs_changed` and `pr:opened` events. Other events are dropped. -For Git commits which message instructs to skip CI, no pipelines are triggererd. Instructions may be anywhere in the commit message and may be one of (case insensitive): +For Git commits of which the commit message instructs skipping CI, no pipelines are triggered. Instructions may be anywhere in the commit message and may be one of (case-insensitive): -``` +[source] +---- [ci skip] [skip ci] ***NO_CI*** -``` +---- -A pipeline is created or updated corresponding to the Git branch received in the webhook request. The pipeline name is made out of the component and the sanitized branch. A maximum of 63 characters is respected. Tasks (including `finally` tasks) of the pipline are read from the ODS config file in the repository. +A pipeline is created or updated corresponding to the Git branch received in the webhook request. The pipeline name is made out of the component and the sanitized branch. A maximum of 63 characters is respected. Tasks (including `finally` tasks) of the pipeline are read from the ODS config file in the repository. A PVC is created per repository unless it exists already. The name is equal to `ods-workspace-` (shortened to 63 characters if longer). This PVC is then used in the pipeline as a shared workspace. diff --git a/docs/example-project.adoc b/docs/example-project.adoc index 8e3195ce..e55fa18d 100644 --- a/docs/example-project.adoc +++ b/docs/example-project.adoc @@ -8,7 +8,7 @@ The goal of this document is to provide a concrete example of a (fake) applicati For this example, we assume that we deal with a simple todo application. The project key in Bitbucket is `TODO`. The application is made up of two components, a backend (repository `todo-backend`) written in Go, and a frontend (repository `todo-frontend`) written in TypeScript. -The example project is deployed on a single OpenShift cluster which has three OpenShift projects to deploy into: `todo-dev`, `todo-qa` and `todo-prod`. ODS pipeline is installed in the `todo-cd` project as per the link:user-installation.adoc[User Installation Guide]. +The example project is deployed on a single OpenShift cluster which has three OpenShift projects to deploy into: `todo-dev`, `todo-qa` and `todo-prod`. ODS pipeline is installed in the `todo-cd` project as per the link:installation.adoc[Installation Guide]. == Pipeline Configuration @@ -32,14 +32,14 @@ pipeline: tasks: - name: build-go taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-2-0 workspaces: - name: source workspace: shared-workspace - name: package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image-v0-2-0 runAfter: - build-go @@ -48,7 +48,7 @@ pipeline: workspace: shared-workspace - name: deploy-helm taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-2-0 runAfter: - package-image @@ -73,14 +73,14 @@ pipeline: tasks: - name: build-typescript taskRef: - kind: ClusterTask + kind: Task name: ods-build-typescript-v0-2-0 workspaces: - name: source workspace: shared-workspace - name: package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image-v0-2-0 runAfter: - build-typescript @@ -89,7 +89,7 @@ pipeline: workspace: shared-workspace - name: deploy-helm taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-2-0 runAfter: - package-image diff --git a/docs/getting-started.adoc b/docs/getting-started.adoc index 38475848..d9054a96 100644 --- a/docs/getting-started.adoc +++ b/docs/getting-started.adoc @@ -2,7 +2,7 @@ = Getting Started -This guide explains how you can start to use ODS pipeline in your project. It is assumed you have read the link:introduction.adoc[introduction] and have aleady link:user-installation.adoc[installed] ODS pipeline in an OpenShift project. +This guide explains how you can start to use ODS pipeline in your project. It is assumed you have read the link:introduction.adoc[introduction] and have aleady link:installation.adoc[installed] ODS pipeline in an OpenShift project. The guide will look at two scenarios: @@ -86,7 +86,7 @@ def stageUnitTest(def context) { } ---- -Compared to Jenkins, you cannot define any scripts directly in the `ods.yaml` file describing your Tekton pipeline. You may only reference existing tasks and adjust their parameters. As a consequence, the build related stages (`stageCheckFormat`, `stageLint`, `stageUnitTest`, `stageBuild`) are provided by a cluster task (named `ods-build-go-vX`) instead. +Compared to Jenkins, you cannot define any scripts directly in the `ods.yaml` file describing your Tekton pipeline. You may only reference existing tasks and adjust their parameters. As a consequence, the build related stages (`stageCheckFormat`, `stageLint`, `stageUnitTest`, `stageBuild`) are provided by a task (named `ods-build-go-vX`) instead. An equivalent `ods.yaml` for the above `Jenkinsfile` looks like this: @@ -106,14 +106,14 @@ pipeline: tasks: - name: backend-build-go taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-2-0 workspaces: - name: source workspace: shared-workspace - name: backend-package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image-v0-2-0 runAfter: - backend-build-go @@ -122,7 +122,7 @@ pipeline: workspace: shared-workspace - name: backend-deploy taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-2-0 runAfter: - backend-package-image @@ -136,7 +136,7 @@ What has been done in Jenkins in `stageCheckFormat`, `stageLint`, `stageUnitTest Building the container image is now done in `ods-package-image-v0-2-0` instead of in `odsComponentStageBuildOpenShiftImage`. The task continues to use the existing `docker/Dockerfile` file, which does not need to change much if at all. Consult the task reference in question for more information. In the case of Go, the link:tasks/ods-build-go.adoc[`ods-build-go` task reference] states that the resulting Go binary is named `app` and placed into the `docker` directory. Make sure that your `docker/Dockerfile` copies `app`, not e.g. `app_linux_amd64` (as is the default for an ODS 4.x based Go quickstarter). Finally, the application is deployed in `ods-deploy-helm-v0-2-0` as opposed to `odsComponentStageRolloutOpenShiftDeployment`. -Let's look at this deployment piece in detail. The new Tekton task makes use of Helm to define and deploy the Kubernetes resources to use. Your existing repository might not define Kubernetes resources at all (this is the default), or they might be expressed as OpenShift templates (in a folder named `openshift`) and applied with link:https://github.com/opendevstack/tailor[Tailor]. ODS pipeline only supports Helm at the moment, and requires the Kuberenetes resources (the Helm "chart") to be under version control as described below in the <> section. +Let's look at this deployment piece in detail. The new Tekton task makes use of Helm to define and deploy the Kubernetes resources to use. Your existing repository might not define Kubernetes resources at all (this is the default), or they might be expressed as OpenShift templates (in a folder named `openshift`) and applied with link:https://github.com/opendevstack/tailor[Tailor]. ODS pipeline only supports Helm at the moment, and requires the Kubernetes resources (the Helm "chart") to be under version control as described below in the <> section. After the `ods.yaml` and the Helm `chart` are added to the repository, the final step is to create a Bitbucket webhook pointing to the ODS pipeline installation. Disable the existing Jenkins webhook setting before creating a new one as described below in the <> section. @@ -162,14 +162,14 @@ pipeline: tasks: - name: backend-build-go taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-2-0 workspaces: - name: source workspace: shared-workspace - name: backend-package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image-v0-2-0 runAfter: - backend-build-go @@ -178,7 +178,7 @@ pipeline: workspace: shared-workspace - name: backend-deploy taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-2-0 runAfter: - backend-package-image diff --git a/docs/helm-secrets.adoc b/docs/helm-secrets.adoc index bbfe029b..2b058b8e 100644 --- a/docs/helm-secrets.adoc +++ b/docs/helm-secrets.adoc @@ -1,12 +1,12 @@ -# Working with secrets in Helm += Working with secrets in Helm The link:tasks/ods-deploy-helm.adoc[`ods-deploy-helm`] task supports encrypted secrets via the link:https://github.com/jkroepke/helm-secrets[`helm-secrets`] plugin, using link:https://github.com/mozilla/sops[`sops`] and link:https://github.com/FiloSottile/age[`age`] under the hood. All Helm values which contain sensitive information such as passwords should be encrypted at rest. This guide will show how to do that. -## Overview +== Overview `helm-secrets` supports different ways to encrypt secrets at rest. The `ods-deploy-helm` task supports age key encryption. In a nutshell, the content is encrypted using a list of age public keys. Owners of the corresponding age secret keys can decrypt the content. As such, you must encrypt the content against an age public key and the corresponding age secret key must be made available to `ods-deploy-helm`. -## Local Setup +== Local Setup To begin with, you'll need an age key. If you do not have an age key yet or want to create a new one for this purpose, you can generate one via `age-keygen`. As described in the `sops` link:https://github.com/mozilla/sops#22encrypting-using-age[documentation], when decrypting using `age`, `sops` will look for a text file name `keys.txt` located in a `sops` subdirectory of your user configuration directory. Therefore it is best to place your age key in that directory. On Linux, this would be `$XDG_CONFIG_HOME/sops/age/keys.txt` (if `$XDG_CONFIG_HOME` is not set, it is usually `$HOME/.config`). On macOS, this would be `$HOME/Library/Application\ Support/sops/age/keys.txt`. On Windows, this would be `%AppData%\sops\age\keys.txt`. @@ -14,10 +14,11 @@ WARNING: If you do not use your user configuration directory as the location of A key pair for encryption can be created by running: -``` +[source] +---- mkdir -p /sops/age age-keygen -o /sops/age/keys.txt -``` +---- At the end of the generation, in the generated file you see a commented line with `#public key: `. Make a note of the `` as we'll need it in a second. @@ -25,25 +26,29 @@ Finally, if you did not install `helm` or the `helm-secrets` plugin locally yet, Now you are ready to work with secret files! -## Editing Secrets +== Editing Secrets The following will refer to your age public key as ``. Take this public key and use it to create an encrypted version of your not-yet encrypted `secrets.yaml`: -``` +[source] +---- sops --encrypt --age --in-place secrets.yaml -``` +---- NOTE: you can add multiple recipients (e.g.: each team member has its own age key) comma-separated: -``` +[source] +---- sops --encrypt --age , --in-place secrets.yaml -``` +---- From now on, you can edit the secrets with: -``` + +[source] +---- helm secrets edit secrets.yaml -``` +---- The `helm-secrets` plugin offers a few commands to edit with secrets. See all of them via `helm secrets --help`. @@ -51,24 +56,27 @@ To create a new secrets file or edit an existing one, use `helm secrets edit \ --from-file=key.txt=/dev/stdin -``` +---- This will create a `Secret` named `helm-secrets-age-key` in the namespace you specify. The age key is then the value of the field `key.txt`. The secret will automatically be detected by the `ods-deploy-helm` task, and the age key will be loaded via `SOPS_AGE_KEY_FILE` so that the `helm-secrets` plugin can use it. Note that the field must be named `key.txt`. If you wish to use a different secret name (e.g. to use different private keys for different repos in the same namespace), you may do so, by supplying a value for the `age-key-secret` parameter of the `ods-deploy-helm` task. -Note that if you used the link:user-installation.adoc[User Installation] instructions to setup the namespace in which your pipelines run, you have a Git repository which defines the infrastructure of this namespace. If you want to use that approach as well to define the `Secret` holding the age key, you can do so by adding a `secret.yaml` file to the chart and have Helm create it instead of using `kubectl create` as explained above. And because the user installation instructions use `git subtree` as a means to setup/update the Helm chart, any changes (such as a custom `secret.yaml` file) are preserved during updates of `ods-pipeline`. +Note that if you used the link:installation.adoc[Installation Guide] to setup the namespace in which your pipelines run, you have a Git repository which defines the infrastructure of this namespace. If you want to use that approach as well to define the `Secret` holding the age key, you can do so by adding a `secret.yaml` file to the chart and have Helm create it instead of using `kubectl create` as explained above. And because the link:installation.adoc[Installation Guide] uses `git subtree` as a means to setup/update the Helm chart, any changes (such as a custom `secret.yaml` file) are preserved during updates of `ods-pipeline`. -## Adding more recipients to encrypted files +== Adding more recipients to encrypted files If you want to give additional people access to view and edit secrets, you can do so via the following: -``` + +[source] +---- sops -r -i --add-age secrets.yaml -``` +---- More information can be found in link:https://github.com/mozilla/sops#adding-and-removing-keys[`sops documentation`] diff --git a/docs/user-installation.adoc b/docs/installation.adoc similarity index 72% rename from docs/user-installation.adoc rename to docs/installation.adoc index d8096512..970f8d0f 100644 --- a/docs/user-installation.adoc +++ b/docs/installation.adoc @@ -1,4 +1,4 @@ -# User Installation Guide += Installation Guide :toc: This guide will show how to install `ods-pipeline` in an existing ODS project. It is possible to use the new Tekton pipelines approach and the classic Jenkins approach side by side. @@ -7,10 +7,11 @@ Note that at the moment, `ods-pipeline` is somewhat compatible with an existing The guide will install the following required resources: +* Basic `BuildConfig`, `ImageStream` and `Task` resources * `ConfigMap` and `Secret` resources, e.g. holding credentials of centrally installed tools such as Nexus and SonarQube -* ODS pipeline manager, wich is managing and triggering pipelines in response to Bitbucket webhook requests +* ODS pipeline manager, which is managing and triggering pipelines in response to Bitbucket webhook requests -## Prerequisites +== Prerequisites You'll need: @@ -18,30 +19,33 @@ You'll need: * `git`, `oc` and `helm` (with plugins link:https://github.com/databus23/helm-diff[`helm-diff`] and link:https://github.com/jkroepke/helm-secrets[`helm-secrets`]) installed locally * to be logged into OpenShift on the command line -## Instructions +== Instructions First, create a repository in Bitbucket, e.g. `foo-cd`. The name can be anything, but since the repository will define the resources in namespace `foo-cd` in code, it makes sense to mirror the namespace name. Clone the repository locally and make an initial commit, e.g. by adding a readme file. IMPORTANT: The following commands will fail in an empty Git repository, so make sure you have made at least one commit in the repository. -Then, use `git subtree` to get the required sources. The following commands may look a bit complicated, but in a nutshell, they are simply adding one folder (`deploy/cd-namespace`) from the `opendestack/ods-pipeline` repository at the given revision (e.g. `master`) into your new local repository at the path `ods-pipeline`. +Then, use `git subtree` to get the required sources. The following commands may look a bit complicated, but in a nutshell, they are simply adding one folder (`deploy/`) from the `opendestack/ods-pipeline` repository at the given revision (e.g. `master`) into your new local repository at the path `deploy`. -``` -pipelineGitRef=v0.2.0 # Pick the version you want to install +[source] +---- +pipelineGitRef=master # Pick the version you want to install git fetch --depth=1 https://github.com/opendevstack/ods-pipeline.git $pipelineGitRef:ods-pipeline-$pipelineGitRef && \ git checkout ods-pipeline-$pipelineGitRef && \ -git subtree split --prefix=deploy/cd-namespace -b subtree-split-branch-$pipelineGitRef && \ +git subtree split --prefix=deploy -b subtree-split-branch-$pipelineGitRef && \ git checkout - && \ -git subtree add --squash --prefix=ods-pipeline subtree-split-branch-$pipelineGitRef -``` +git subtree add --squash --prefix=deploy subtree-split-branch-$pipelineGitRef +---- -Once this is done, change to the new folder `ods-pipeline` to configure the values and secrets to use for the installation. +Once this is done, change to the new folder `deploy` to configure the values and secrets to use for the installation. For the values, just run: -``` -cp chart/values.yaml values.yaml -``` + +[source] +---- +cp ods-pipeline/values.yaml values.yaml +---- Regarding the secrets, it is recommended to encrypt them at rest, therefore the following describes how to first encrypt them, but also how to edit them using the `helm-secrets` plugin. @@ -56,34 +60,44 @@ environment variable `SOPS_AGE_KEY_FILE`. The following will refer to this user Knowing this, a key pair for encryption can be created by running: -``` +[source] +---- mkdir -p /sops/age age-keygen -o /sops/age/keys.txt -``` +---- This prints as a result the public key (alternatively you can find it in the `keys.txt`) which looks similar to this: -``` + +[source] +---- Public key: age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p -``` +---- The following will refer to this public key as ``. Take the public key and use it to create an encrypted version of your `secrets.yaml`: -``` -sops --encrypt --age chart/secrets.yaml > secrets.yaml -``` +[source] +---- +sops --encrypt --age ods-pipeline/secrets.yaml > secrets.yaml +---- NOTE: you can add multiple recipients (e.g.: each team member has its own age key) comma-separated: -``` -sops --encrypt --age , chart/secrets.yaml > secrets.yaml -``` +[source] +---- +sops --encrypt --age , ods-pipeline/secrets.yaml > secrets.yaml +---- Now you can edit the secrets with: -``` + +[source] +---- helm secrets edit secrets.yaml -``` +---- + +CAUTION: If you configure an Aqua scanner download URL, make sure that username/password are URL-encoded and that the `scannercli` version matches your Aqua server version. +IMPORTANT: In ODS 4.0.0, the central Nexus instance does not have the repositories `ods-temporary-artifacts` and `ods-permanent-artifacts` after the default ODS installation. If those repositories are not present in your Nexus instance yet, you will need to create them manually. The repositories are of type "raw" and should not allow re-deployment of artifacts. It is recommended to use blob stores for both. As administrator, you may prune the `ods-temporary-artifacts` repository using cleanup policies of your own choosing. The `ods-permanent-artifacts` repository should not be cleaned up or have a retention period matching your organisation policy of record retention. Now fill in the variables as described in the comments in both (values.yaml and secrets.yaml) files. Then you can install the resources via `./install.sh -n -f values.yaml,secrets.yaml` (make sure to replace the namespace). You may also use `--dry-run` to see the changes first. @@ -93,18 +107,20 @@ Now your cd namespace is fully setup and you can start to utilize Tekton pipelin See the link:getting-started.adoc[Getting Started] guide for more information on usage. -## Updating +== Updating You may fetch updates (e.g. new versions) of `ods-pipeline` like this: -``` + +[source] +---- pipelineGitRef=v0.2.0 # Pick the version you want to install git fetch --depth=1 https://github.com/opendevstack/ods-pipeline.git $pipelineGitRef:ods-pipeline-$pipelineGitRef && \ git checkout ods-pipeline-$pipelineGitRef && \ -git subtree split --prefix=deploy/cd-namespace -b subtree-split-branch-$pipelineGitRef && \ +git subtree split --prefix=deploy -b subtree-split-branch-$pipelineGitRef && \ git checkout - && \ -git subtree merge --prefix=ods-pipeline subtree-split-branch-$pipelineGitRef --squash -``` +git subtree merge --prefix=deploy subtree-split-branch-$pipelineGitRef --squash +---- Now, compare if any new values have been introduced and update the values and secrets file accordingly. diff --git a/docs/introduction.adoc b/docs/introduction.adoc index ffe9ae52..b26da8f9 100644 --- a/docs/introduction.adoc +++ b/docs/introduction.adoc @@ -1,12 +1,12 @@ -# ODS Pipeline Introduction += ODS Pipeline Introduction ODS provides CI/CD pipeline support based on OpenShift Pipelines. This introduction will walk you through the essentials, and guide you all the way to more advanced topics. Basic knowledge of Kubernetes concepts and OpenShift is assumed. Estimated reading time is about 15 minutes. -## What is OpenShift Pipelines? +== What is OpenShift Pipelines? https://www.openshift.com/learn/topics/pipelines[OpenShift Pipelines] is a Kubernetes-style CI/CD solution based on Tekton. It builds on the Tekton building blocks and offers tight integration with OpenShift. The main addition over plain Tekton is a UI in the OpenShift console. -## What is Tekton? +== What is Tekton? https://tekton.dev[Tekton] provides a framework to create cloud-native CI/CD pipelines. The building blocks for those pipelines are defined using Kubernetes Custom Resources. @@ -16,9 +16,9 @@ image::https://raw.githubusercontent.com/openshift/pipelines-tutorial/master/doc At this stage you know just enough about Tekton to continue with this introduction, but if you want to know more about it, you can read the https://tekton.dev/docs/[Tekton docs] and/or follow the https://github.com/openshift/pipelines-tutorial[OpenShift Pipelines tutorial]. -## What does ODS bring to the table? +== What does ODS bring to the table? -In regards to CI/CD, ODS provides two things: +In regard to CI/CD, ODS provides two things: * a few Tekton Tasks for use in pipelines * a pipeline manager responding to Bitbucket webhook events by triggering pipelines @@ -29,9 +29,9 @@ The ODS tasks can be used in a pipeline to build, deploy and test your applicati The tasks are so easy to exchange and compose as Tekton tasks have clearly defined inputs (the parameters), clearly defined outputs (the results) and work on a generic workspace, for which an actual volume is provided to them by the pipeline. -## Which tasks does ODS provide? +== Which tasks does ODS provide? -An ODS pipeline installation provides you with the following tasks, which are implemented as `ClusterTask` resources, allowing you to use them in any namespace: +An ODS pipeline installation provides you with the following tasks, which are implemented as `Task` resources: * `ods-start`: Checkout repository and set Bitbucket build status * `ods-build-go`: Build a Go application (includes Sonar scan) @@ -59,7 +59,7 @@ The produced images are tagged with the Git commit SHA being built. If the task The behaviour of each task can be customized by setting parameters. For example, the `ods-package-image` tasks assumes the `Dockerfile` to be located in the `docker` directory by default. You can instruct the task to use a different Docker context by providing the `context-dir` parameter to the task. -## How do I use the tasks provided by ODS? +== How do I use the tasks provided by ODS? As you have learned earlier, tasks are referenced by pipelines. Therefore, all you would need to do to use the ODS tasks is to create a pipeline in OpenShift, and reference the tasks you want to execute. Then you'd need to start the pipeline (which creates a `PipelineRun`). @@ -73,19 +73,20 @@ To solve these problems (and a few more ...), ODS ships with another component a To understand how this works, it is best to trace the flow starting from the repository. Assume you have a repository containing a Go application, and you want to run a pipeline building a container image for it every time you push to Bitbucket. To achieve this in a project created by ODS, all you need is to have an `ods.yaml` file in the root of your repository. The `ods.yaml` file defines the tasks you want to run in the pipeline. Let's look at an example `ods.yaml` file for our Go repository: -```yml +[source,yml] +---- pipeline: tasks: - name: build-go taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-2-0 workspaces: - name: source workspace: shared-workspace - name: package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image-v0-2-0 runAfter: - build-go @@ -94,18 +95,18 @@ pipeline: workspace: shared-workspace - name: deploy-helm taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-2-0 runAfter: - package-image workspaces: - name: source workspace: shared-workspace -``` +---- You can see that it defines three tasks, `ods-build-go`, `ods-package-image` and `ods-deploy-helm`, which run sequentially due to the usage of `runAfter`. -In order to create pipeline runs based on these task definitions whenever there is a push to Bitbucket, a webhook setting must be created for the repository. This webhook must point to a route connected to the ODS pipeline manager in OpenShift. When the webhook fires, a payload with information about the pushed commit is sent. The ODS pipeline manager first checks the authenticity of the request (did the request really originate from a push in the Bitbucket repository?). Then, it retrieves the `ods.yaml` file from the Git repository/ref identified in the payload, and reads the pipeline configuration. Based on the tasks defined there, it assembles a new Tekton pipeline. The name of this new pipelines is a concatenation of the repository name and the Git ref (e.g. `myapp-master`). In the next step, the ODS pipeline manager checks if a pipeline with that name already exists, and either creates a new pipeline or updates the existing pipeline. That way, you get one pipeline per branch which makes it easier to navigate in the OpenShift UI and allows to see pipeline duration trends easily. Finally, the ODS pipeline manager triggers the pipeline, passing parameter values extracted from the webhook event payload. The following illustrates this flow: +In order to create pipeline runs based on these task definitions whenever there is a push to Bitbucket, a webhook setting must be created for the repository. This webhook must point to a route connected to the ODS pipeline manager in OpenShift. When the webhook fires, a payload with information about the pushed commit is sent. The ODS pipeline manager first checks the authenticity of the request (did the request really originate from a push in the Bitbucket repository?). Then, it retrieves the `ods.yaml` file from the Git repository/ref identified in the payload, and reads the pipeline configuration. Based on the tasks defined there, it assembles a new Tekton pipeline. The name of this new pipelines is a concatenation of the repository name and the Git ref (e.g. `myapp-master`). In the next step, the ODS pipeline manager checks if a pipeline with that name already exists, and either creates a new pipeline or updates the existing pipeline. That way, you get one pipeline per branch which makes it easier to navigate in the OpenShift UI and allows seeing pipeline duration trends easily. Finally, the ODS pipeline manager triggers the pipeline, passing parameter values extracted from the webhook event payload. The following illustrates this flow: image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/opendevstack/ods-pipeline/master/docs/architecture/trigger_architecture.puml[Trigger Architecture] diff --git a/docs/ods-configuration.adoc b/docs/ods-configuration.adoc index dc5d626f..0be954b3 100644 --- a/docs/ods-configuration.adoc +++ b/docs/ods-configuration.adoc @@ -21,7 +21,7 @@ pipeline: tasks: - name: backend-build-go taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-2-0 workspaces: - name: source @@ -30,7 +30,7 @@ pipeline: Each task reference is just the plain Tekton definition. See the Tekton documentation on link:https://tekton.dev/docs/pipelines/pipelines/#adding-tasks-to-the-pipeline[Adding Tasks to the Pipeline] for more information. -A typical central `ods-pipeline` installation offers its task as `CluserTask` resources, therefore the `taskRef.kind` needs to be `ClusterTask`. The value of the `taskRef.name` field depends on the version of `ods-pipeline` that is installed in your cluster. Available tasks for you to use can be found in your OpenShift console UI under "Pipelines > Cluster Tasks". +A typical `ods-pipeline` installation offers its task as `Task` resources, therefore the `taskRef.kind` needs to be `Task`. The value of the `taskRef.name` field depends on the version of `ods-pipeline` that is installed in your project. Available tasks for you to use can be found in your OpenShift console UI under "Pipelines > Tasks". The pipeline created based on the configuration will have a workspace named `shared-workspace` available, which is backed by a PVC (named `ods-pipeline`) in your namespace. @@ -38,7 +38,7 @@ Next to the tasks you specify, `ods-pipeline` will automatically inject two task The `ods-finish` task is added as a final task to the pipeline. Final tasks run at the end, regardless whether all previous tasks succeeded. The `ods-finish` sets the Bitbucket build status and deals with Nexus artifacts, etc. -You can also specify further final tasks to be added to the pipeline by specyfing them under `finally`. Example: +You can also specify further final tasks to be added to the pipeline by specifying them under `finally`. Example: .ods.yaml [source,yaml] @@ -100,7 +100,7 @@ TIP: If you want to promote images between environments without rebuilding them, == `repositories` -If your application is made out of multiple components, you may want to have one "umbrella" repository that ties all those components together and deploys the whole application together. In this case, the umbrella repository can specify the subrepositores via the `repositories` field. Example: +If your application is made out of multiple components, you may want to have one "umbrella" repository that ties all those components together and deploys the whole application together. In this case, the umbrella repository can specify the subrepositories via the `repositories` field. Example: .ods.yaml [source,yaml] @@ -112,6 +112,6 @@ repositories: url: https://bitbucket.acme.org/scm/baz/bar.git ---- -If the repository does not specify a URL, the repository is assumed to be under the same organisation than the repository hosting the `ods.yaml` file. If no branch is given, `master` is used as a default. +If the repository does not specify a URL, the repository is assumed to be under the same organisation as the repository hosting the `ods.yaml` file. If no branch is given, `master` is used as a default. Repositories listed in `ods.yaml` are checked out in `ods-start` in `.ods/repos` and any tasks in the pipeline can alter their behaviour based on the presence of subrepos. For example, the `ods-deploy-helm` task will package any charts in subrepos and add them to the chart in the umbrella repository, deploying all charts as one release. diff --git a/docs/releasing.adoc b/docs/releasing.adoc index 4556b07a..b3280331 100644 --- a/docs/releasing.adoc +++ b/docs/releasing.adoc @@ -14,15 +14,14 @@ The current version is hardcoded in a few places across the repository. All of t === Publishing the release Draft a new GitHub release, creating a new tag in the process (e.g. `v0.2.0`). The description should be like this: -``` +[source] +---- < Note highlights of the release and any breaking changes > For all changes and more details, please see the [changelog](https://github.com/opendevstack/ods-pipeline/blob/master/CHANGELOG.md#< Add anchor of the released version>). -To update the cluster-wide pipeline installation, refer to the [admin update instructions](https://github.com/opendevstack/ods-pipeline/blob/master/docs/admin-installation.adoc#updating). - -To update your project-specific pipeline installation, refer to the [user update instructions](https://github.com/opendevstack/ods-pipeline/blob/master/docs/user-installation.adoc#updating). -``` +To update your project-specific pipeline installation, refer to the [update instructions](https://github.com/opendevstack/ods-pipeline/blob/master/docs/installation.adoc#updating). +---- === Attaching binaries to the release The `artifact-download` binary should be offered as a pre-built binary for `linux/amd64`, `darwin/amd64` and `windows/amd64`. These can be generated via `make build-artifact-download`, and then uploaded in the GitHub release creation form. diff --git a/docs/repository-layout.adoc b/docs/repository-layout.adoc index fd1bf40f..74efd4cb 100644 --- a/docs/repository-layout.adoc +++ b/docs/repository-layout.adoc @@ -6,7 +6,7 @@ The most important pieces are: * **build/package**: `Dockerfile`s for the various container images in use. These images back Tekton tasks or the pipeline manager. * **cmd**: Main executables. These are installed (in different combinations) into the contaier images. -* **deploy**: OpenShift/K8S resource definitions, such as `BuildConfig`/`ImageStream` or `ClusterTask` resources. The tasks typically make use of the images built via `build/package` and their `script` calls one or more executables built from the `cmd` folder. +* **deploy**: OpenShift/K8S resource definitions, such as `BuildConfig`/`ImageStream` or `Task` resources. The tasks typically make use of the images built via `build/package` and released to ghcr.io. Their `script` calls one or more executables built from the `cmd` folder. * **docs**: Design and user documents * **internal/manager**: Implementation of the webhook receiver and pipeline manager - it creates and modifies the actual Tekton pipelines on the fly based on the config found in the repository triggering the webhook request. * **pkg**: Packages shared by the various main executables and the pipeline manager. These packages are the public interface and may be used outside this repo (e.g. by custom tasks). Example of packages are `bitbucket` (a Bitbucket Server API v1.0 client), `sonar` (a SonarQube client exposing API endpoints, scanner CLI and report CLI in one unified interface), `nexus` (a Nexus client for uploading, downloading and searching for assets) and `config` (the ODS configuration specification). diff --git a/docs/tasks/ods-build-go-with-sidecar.adoc b/docs/tasks/ods-build-go-with-sidecar.adoc deleted file mode 100644 index 3a4dc1b9..00000000 --- a/docs/tasks/ods-build-go-with-sidecar.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-go-with-sidecar - -Builds Go (module) applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[build/package/scripts/build-go.sh]. - -The following provides an overview of the performed steps: - -- Source files are checked to be formatted with `gofmt`. -- The go module cache is configured to be on the cache location of the PVC by setting environment variable `GOMODCACHE` to `.ods-cache/deps/gomod` (see link:https://go.dev/ref/mod#module-cache[go module cache]). -- `golanci-lint` is run. The linter can be configured via a - config file as described in the - link:https://golangci-lint.run/usage/configuration/[configuration documentation]. -- Tests are executed. A potential `vendor` directory is excluded. Test - results are converted into xUnit format. If test artifacts are already present for - the current Git commit SHA, testing is skipped. -- Application binary (named `app`) is built and placed into the directory - specified by `output-dir`. - -Finally, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.out` -* `lint-reports/` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -**Sidecar variant!** Use this task if you need to run a container next to the build task. -For example, this could be used to run a database to allow for integration tests. -The sidecar image to must be supplied via `sidecar-image`. -Apart from the sidecar, the task is an exact copy of `ods-build-go`. - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| enable-cgo -| false -| Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - - -| go-os -| linux -| `GOOS` variable (the execution operating system such as `linux`, `windows`). - - -| go-arch -| amd64 -| `GOARCH` variable (the execution architecture such as `arm`, `amd64`). - - -| output-dir -| docker -| Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| pre-test-script -| -| Script to execute before running tests, relative to the working directory. - - -| sonar-quality-gate -| false -| Whether the SonarQube quality gate needs to pass for the task to succeed. - - -| sonar-skip -| false -| Whether to skip SonarQube analysis or not. - - -| sidecar-image -| -| Image to use for sidecar - -|=== - -== Results - -N/A diff --git a/docs/tasks/ods-build-gradle-with-sidecar.adoc b/docs/tasks/ods-build-gradle-with-sidecar.adoc deleted file mode 100644 index 2483e5da..00000000 --- a/docs/tasks/ods-build-gradle-with-sidecar.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-gradle-with-sidecar - -Builds Gradle applications. - -The following steps are executed: - -- build gradle application, using `gradlew clean build`, which includes tests execution and coverage report generation -- SonarQube quality scan - -Notes: - -- tests exclude the vendor directory. -- test results are converted into xUnit format. - -Available environment variables: - -- `ODS_OUTPUT_DIR`: this environment variable points to the folder -that this build expects generated application artifacts to be copied to. -The gradle script should read it and copy there the generated artifacts. -- `NEXUS_*` env vars: `NEXUS_URL`, `NEXUS_USERNAME` and `NEXUS_PASSWORD` -are available and should be read by the gradle script. - -To enable the gradle script to copy the generated application artifacts script follow these steps: - -- read the environment variable `ODS_OUTPUT_DIR` in the buildscript section of the gradle script: -``` -buildscript { - ext { - outputDir = System.getenv('ODS_OUTPUT_DIR') - } -} -``` -- customize the jar tasks to set the destination directory -``` -jar { - println("Set application jar name to 'app'") - archiveBaseName = 'app' - if (outputDir != null) { - println("Set destinationDirectory to '${projectDir}/${outputDir}'") - destinationDirectory = file("${projectDir}/${outputDir}") - } -} -``` - -To create a coverage report be sure that you add to `gradle.properties` the required -configuration. For example to enable Jacoco coverage repot you will need to: - -- add `jacoco` plugin: -``` -plugins { - id 'application' - id 'jacoco' -} -``` -- add task `jacocoTestReport`: -``` -jacocoTestReport { - reports { - xml.required = true - } -} -``` -- add `finalizedBy jacocoTestReport` to the task `test`: -``` -tasks.named('test') { - useJUnitPlatform() - finalizedBy jacocoTestReport -} -``` - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[build/package/scripts/build-gradle.sh]. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -**Sidecar variant!** Use this task if you need to run a container next to the build task. -For example, this could be used to run a database to allow for integration tests. -The sidecar image to must be supplied via `sidecar-image`. -Apart from the sidecar, the task is an exact copy of `ods-build-gradle`. - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| gradle-additional-tasks -| -| Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - - -| gradle-options -| --no-daemon --stacktrace -| Options to be passed to the gradle build. (See ref: https://docs.gradle.org/7.3.3/userguide/command_line_interface.html#sec:command_line_debugging) - - -| gradle-opts-env -| -Dorg.gradle.jvmargs=-Xmx512M -| Will be exposed to the build via `GRADLE_OPTS` environment variable. Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` (See ref: https://docs.gradle.org/7.3.3/userguide/build_environment.html#sec:gradle_configuration_properties). - - -| output-dir -| docker -| Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| sonar-quality-gate -| false -| Whether the SonarQube quality gate needs to pass for the task to succeed. - - -| sonar-skip -| false -| Whether to skip SonarQube analysis or not. - - -| sidecar-image -| -| Image to use for sidecar - -|=== - -== Results - -N/A diff --git a/docs/tasks/ods-build-python-with-sidecar.adoc b/docs/tasks/ods-build-python-with-sidecar.adoc deleted file mode 100644 index 069fecef..00000000 --- a/docs/tasks/ods-build-python-with-sidecar.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-python-with-sidecar - -Builds Python applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[build/package/scripts/build-python.sh]. -In particular, the Python source files are expected to be located in `src`. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -**Sidecar variant!** Use this task if you need to run a container next to the build task. -For example, this could be used to run a database to allow for integration tests. -The sidecar image to must be supplied via `sidecar-image`. -Apart from the sidecar, the task is an exact copy of `ods-build-python`. - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| output-dir -| docker -| Path to the directory into which outputs should be placed, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| max-line-length -| 120 -| Maximum line length. - - -| pre-test-script -| -| Script to execute before running tests, relative to the working directory. - - -| sonar-quality-gate -| false -| Whether quality gate needs to pass. - - -| sonar-skip -| false -| Whether to skip the SonarQube analysis or not. - - -| sidecar-image -| -| Image to use for sidecar - -|=== - -== Results - -N/A diff --git a/docs/tasks/ods-build-typescript-with-sidecar.adoc b/docs/tasks/ods-build-typescript-with-sidecar.adoc deleted file mode 100644 index 63008cfb..00000000 --- a/docs/tasks/ods-build-typescript-with-sidecar.adoc +++ /dev/null @@ -1,113 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-typescript-with-sidecar - -Builds Typescript applications. - -The following steps are executed: - -- checks that package.json and package-lock.json exists to require best practice of using lock files. See also link:https://github.com/opendevstack/ods-pipeline/discussions/411[discussion 411] -- linting using `eslint` -- build typescript application, using `npm run build` -- test execution -- SonarQube quality scan - -For `eslint` to work there needs to be a config file (`eslintrc.json` or similar) at the root of the working directory. -This can be done by running `eslint --init` or by following the link:https://eslint.org/docs/user-guide/getting-started[official documentation] - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-typescript.sh[build/package/scripts/build-typescript.sh]. -In particular, `npm run build` is expected to place outputs into `dist`. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `clover.xml` - ** `coverage-final.json` - ** `lcov.info` -* `lint-reports` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -**Sidecar variant!** Use this task if you need to run a container next to the build task. -For example, this could be used to run a database to allow for integration tests. -The sidecar image to must be supplied via `sidecar-image`. -Apart from the sidecar, the task is an exact copy of `ods-build-typescript`. - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| output-dir -| docker -| Path to the directory into which outputs should be placed, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| max-lint-warnings -| 0 -| Maximum of allowed linting warnings after which eslint will exit with an error. Set to "-1" to never exit with an error due to warnings. - - -| lint-file-ext -| .js,.ts,.jsx,.tsx,.svelte -| File extensions to lint separated by a comma. - - -| sonar-quality-gate -| false -| Whether quality gate needs to pass. - - -| sonar-skip -| false -| Whether to skip the SonarQube analysis or not. - - -| node-version -| 16 -| Node.js version to use - supported versions: 16 - - -| build-dir -| dist -| Must match the directory into which `npm run build` places files. The files inside `build-dir` will be copied to the `dist` folder in `output-dir` As a result the files will be in `$output-dir/dist` Other common build directories are `build` and `public`. - - -| copy-node-modules -| false -| Whether `node-modules` is copied to the `output-dir` or not. If copied the node modules are in `$output-dir/dist/node_modules`. For frontend components this should be set to "false", while for backend components this should be set to "true". - - -| sidecar-image -| -| Image to use for sidecar - -|=== - -== Results - -N/A diff --git a/internal/docs/tasks.go b/internal/docs/tasks.go index 84517550..33c4a32c 100644 --- a/internal/docs/tasks.go +++ b/internal/docs/tasks.go @@ -53,13 +53,13 @@ func renderTemplate(targetDir, targetFilename string, data Task) error { return tmpl.Execute(targetFile, data) } -func parseTasks(helmTemplateOutput []byte) ([]*tekton.ClusterTask, error) { - var tasks []*tekton.ClusterTask +func parseTasks(helmTemplateOutput []byte) ([]*tekton.Task, error) { + var tasks []*tekton.Task tasksBytes := bytes.Split(helmTemplateOutput, []byte("---")) for _, taskBytes := range tasksBytes { - var t tekton.ClusterTask + var t tekton.Task err := yaml.Unmarshal(taskBytes, &t) if err != nil { return nil, err diff --git a/internal/manager/server_test.go b/internal/manager/server_test.go index 27be74db..fd599f58 100644 --- a/internal/manager/server_test.go +++ b/internal/manager/server_test.go @@ -57,7 +57,7 @@ func TestRenderPipeline(t *testing.T) { var odsConfig *config.ODS err := yaml.Unmarshal(conf, &odsConfig) fatalIfErr(t, err) - gotPipeline, err := renderPipeline(odsConfig, data, "ClusterTask", "-v0-1-0") + gotPipeline, err := renderPipeline(odsConfig, data, "Task", "-v0-1-0") fatalIfErr(t, err) if diff := cmp.Diff(wantPipeline, gotPipeline); diff != "" { t.Fatalf("renderPipeline() mismatch (-want +got):\n%s", diff) @@ -246,7 +246,7 @@ func testServer(kc kubernetes.ClientInterface, tc tektonClient.ClientInterface, Project: "bar", Token: "test", WebhookSecret: testWebhookSecret, - TaskKind: "ClusterTask", + TaskKind: "Task", RepoBase: "https://domain.com", StorageConfig: StorageConfig{ Provisioner: "kubernetes.io/aws-ebs", diff --git a/pkg/tasktesting/helper.go b/pkg/tasktesting/helper.go index 4991acba..8a527f4d 100644 --- a/pkg/tasktesting/helper.go +++ b/pkg/tasktesting/helper.go @@ -48,14 +48,14 @@ func Setup(t *testing.T, opts SetupOpts) (*k.Clients, string) { t.Error(err) } - installCDNamespaceResources(t, namespace, "pipeline", "./chart/values.kind.yaml,./chart/values.generated.yaml") + installCDNamespaceResources(t, namespace, "pipeline") return clients, namespace } -func installCDNamespaceResources(t *testing.T, ns, serviceaccount, valuesFile string) { +func installCDNamespaceResources(t *testing.T, ns, serviceaccount string) { - scriptArgs := []string{"-n", ns, "-s", serviceaccount, "-f", valuesFile, "--no-diff"} + scriptArgs := []string{"-n", ns, "-s", serviceaccount, "--no-diff"} if testing.Verbose() { scriptArgs = append(scriptArgs, "-v") } diff --git a/pkg/tasktesting/run.go b/pkg/tasktesting/run.go index 8eb79963..2ff9812c 100644 --- a/pkg/tasktesting/run.go +++ b/pkg/tasktesting/run.go @@ -27,10 +27,6 @@ type TestOpts struct { } type TestCase struct { - // TaskVariant allows to target a variant of the task. E.g. the `ods-build-go task` - // has a `ods-build-go-with-sidecar` variant. This variant can be targeted by - // setting TaskVariant to `with-sidecar`. - TaskVariant string // Map workspace name of task to local directory under test/testdata/workspaces. WorkspaceDirMapping map[string]string TaskParamsMapping map[string]string diff --git a/scripts/install-cd-namespace-resources.sh b/scripts/install-cd-namespace-resources.sh index 6dede554..4563bef2 100755 --- a/scripts/install-cd-namespace-resources.sh +++ b/scripts/install-cd-namespace-resources.sh @@ -4,9 +4,9 @@ set -ue SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} -# Delegate to install.sh within deploy/cd-namespace. +# Delegate to install.sh within deploy/ods-pipeline. # The script here exists only for consistency (all scripts are located under /scripts) -# and the install.sh script is under deploy/cd-namespace so that the whole +# and the install.sh script is under deploy/ods-pipeline so that the whole # deployment is self-contained within that folder, making it easy for consumers # to pull in the deployment logic into their repositories via "git subtree". -"${ODS_PIPELINE_DIR}"/deploy/cd-namespace/install.sh "$@" +"${ODS_PIPELINE_DIR}"/deploy/install.sh -f ./ods-pipeline/values.kind.yaml,./ods-pipeline/values.generated.yaml "$@" diff --git a/scripts/install-ods-central-resources.sh b/scripts/install-ods-central-resources.sh deleted file mode 100755 index ba77364d..00000000 --- a/scripts/install-ods-central-resources.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -set -ue - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} - -# Delegate to install.sh within deploy/central. -# The script here exists only for consistency (all scripts are located under /scripts) -# and the install.sh script is under deploy/central so that the whole -# deployment is self-contained within that folder, making it easy for consumers -# to pull in the deployment logic into their repositories via "git subtree". -"${ODS_PIPELINE_DIR}"/deploy/central/install.sh --chart=images "$@" -"${ODS_PIPELINE_DIR}"/deploy/central/install.sh --chart=tasks "$@" diff --git a/scripts/install-ods-tasks-kind.sh b/scripts/install-ods-tasks-kind.sh deleted file mode 100755 index 706f7593..00000000 --- a/scripts/install-ods-tasks-kind.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -set -ue - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} - -# Delegate to install.sh within deploy/central. -# The script here exists only for consistency (all scripts are located under /scripts) -# and the install.sh script is under deploy/central so that the whole -# deployment is self-contained within that folder, making it easy for consumers -# to pull in the deployment logic into their repositories via "git subtree". -"${ODS_PIPELINE_DIR}"/deploy/central/install.sh \ - --namespace=default \ - --chart=tasks \ - --no-diff \ - --values=./tasks-chart/values.kind.yaml "$@" diff --git a/scripts/run-bitbucket.sh b/scripts/run-bitbucket.sh index 2642b5fb..7361aa99 100755 --- a/scripts/run-bitbucket.sh +++ b/scripts/run-bitbucket.sh @@ -14,7 +14,7 @@ BITBUCKET_SERVER_IMAGE_TAG="7.6.5" BITBUCKET_POSTGRES_HOST_PORT="5432" BITBUCKET_POSTGRES_CONTAINER_NAME="ods-test-bitbucket-postgres" BITBUCKET_POSTGRES_IMAGE_TAG="12" -HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/cd-namespace/chart/values.generated.yaml" +HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/ods-pipeline/values.generated.yaml" while [[ "$#" -gt 0 ]]; do case $1 in @@ -49,9 +49,13 @@ if ! "${SCRIPT_DIR}/waitfor-bitbucket.sh" ; then fi BITBUCKET_URL_FULL="http://${BITBUCKET_SERVER_CONTAINER_NAME}.kind:7990" +if [ ! -e "${HELM_VALUES_FILE}" ]; then + echo "setup:" > "${HELM_VALUES_FILE}" +fi + { - echo "bitbucketUrl: '${BITBUCKET_URL_FULL}'" - echo "bitbucketUsername: 'admin'" - echo "bitbucketAccessToken: 'NzU0OTk1MjU0NjEzOpzj5hmFNAaawvupxPKpcJlsfNgP'" - echo "bitbucketWebhookSecret: 's3cr3t'" + echo " bitbucketUrl: '${BITBUCKET_URL_FULL}'" + echo " bitbucketUsername: 'admin'" + echo " bitbucketAccessToken: 'NzU0OTk1MjU0NjEzOpzj5hmFNAaawvupxPKpcJlsfNgP'" + echo " bitbucketWebhookSecret: 's3cr3t'" } >> "${HELM_VALUES_FILE}" diff --git a/scripts/run-nexus.sh b/scripts/run-nexus.sh index ea4b6c88..6e849b75 100755 --- a/scripts/run-nexus.sh +++ b/scripts/run-nexus.sh @@ -14,7 +14,7 @@ NEXUS_URL= IMAGE_NAME="ods-test-nexus" CONTAINER_NAME="ods-test-nexus" NEXUS_IMAGE_TAG="3.30.1" -HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/cd-namespace/chart/values.generated.yaml" +HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/ods-pipeline/values.generated.yaml" while [[ "$#" -gt 0 ]]; do case $1 in @@ -87,8 +87,12 @@ sed "s|@developer_password@|${DEVELOPER_PASSWORD}|g" "${SCRIPT_DIR}"/nexus/devel runJsonScript "createUser" "-d @${SCRIPT_DIR}/nexus/developer-user-with-password.json" rm "${SCRIPT_DIR}"/nexus/developer-user-with-password.json +if [ ! -e "${HELM_VALUES_FILE}" ]; then + echo "setup:" > "${HELM_VALUES_FILE}" +fi + { - echo "nexusUrl: 'http://${CONTAINER_NAME}.kind:8081'"; - echo "nexusUsername: '${DEVELOPER_USERNAME}'"; - echo "nexusPassword: '${DEVELOPER_PASSWORD}'"; + echo " nexusUrl: 'http://${CONTAINER_NAME}.kind:8081'"; + echo " nexusUsername: '${DEVELOPER_USERNAME}'"; + echo " nexusPassword: '${DEVELOPER_PASSWORD}'"; } >> "${HELM_VALUES_FILE}" diff --git a/scripts/run-sonarqube.sh b/scripts/run-sonarqube.sh index ad7bcc5d..8d1cd13a 100755 --- a/scripts/run-sonarqube.sh +++ b/scripts/run-sonarqube.sh @@ -13,7 +13,7 @@ SONAR_USERNAME="admin" SONAR_PASSWORD="admin" SONAR_EDITION="community" SONAR_IMAGE_TAG="${SONAR_VERSION}-${SONAR_EDITION}" -HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/cd-namespace/chart/values.generated.yaml" +HELM_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/ods-pipeline/values.generated.yaml" while [[ "$#" -gt 0 ]]; do case $1 in @@ -45,8 +45,12 @@ tokenResponse=$(curl ${INSECURE} -X POST -sSf --user "${SONAR_USERNAME}:${SONAR_ # {"login":"cd_user","name":"foo","token":"bar","createdAt":"2020-04-22T13:21:54+0000"} token=$(echo "${tokenResponse}" | jq -r .token) +if [ ! -e "${HELM_VALUES_FILE}" ]; then + echo "setup:" > "${HELM_VALUES_FILE}" +fi + { - echo "sonarUrl: 'http://${CONTAINER_NAME}.kind:9000'" - echo "sonarUsername: '${SONAR_USERNAME}'" - echo "sonarAuthToken: '${token}'" + echo " sonarUrl: 'http://${CONTAINER_NAME}.kind:9000'" + echo " sonarUsername: '${SONAR_USERNAME}'" + echo " sonarAuthToken: '${token}'" } >> "${HELM_VALUES_FILE}" diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 1d3c0e16..4bccbb2c 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -117,7 +117,7 @@ pipeline: tasks: - name: package-image taskRef: - kind: ClusterTask + kind: Task name: ods-package-image workspaces: - name: source diff --git a/test/tasks/common_test.go b/test/tasks/common_test.go index 822fcd3a..a1b94b3b 100644 --- a/test/tasks/common_test.go +++ b/test/tasks/common_test.go @@ -3,7 +3,6 @@ package tasks import ( "crypto/sha256" "flag" - "fmt" "io/ioutil" "os" "path/filepath" @@ -27,7 +26,7 @@ var alwaysKeepTmpWorkspacesFlag = flag.Bool("always-keep-tmp-workspaces", false, var outsideKindFlag = flag.Bool("outside-kind", false, "Whether to continue if not in KinD cluster") const ( - taskKindRef = "ClusterTask" + taskKindRef = "Task" ) func checkODSContext(t *testing.T, repoDir string, want *pipelinectxt.ODSContext) { @@ -150,9 +149,6 @@ func runTaskTestCases(t *testing.T, taskName string, requiredServices []tasktest for name, tc := range testCases { t.Run(name, func(t *testing.T) { tn := taskName - if tc.TaskVariant != "" { - tn = fmt.Sprintf("%s-%s", taskName, tc.TaskVariant) - } if tc.Timeout == 0 { tc.Timeout = 5 * time.Minute } diff --git a/test/tasks/ods-build-go_test.go b/test/tasks/ods-build-go_test.go index 248d4cd3..1aadf7fd 100644 --- a/test/tasks/ods-build-go_test.go +++ b/test/tasks/ods-build-go_test.go @@ -167,24 +167,5 @@ func TestTaskODSBuildGo(t *testing.T) { checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") }, }, - "build go app with redis sidecar": { - TaskVariant: "with-sidecar", - WorkspaceDirMapping: map[string]string{"source": "go-redis"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "sonar-skip": "true", - "sidecar-image": "redis:6.2.5-buster", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - notWantLogMsg := "No sonar-project.properties present, using default:" - if strings.Contains(string(ctxt.CollectedLogs), notWantLogMsg) { - t.Fatalf("Did not want:\n%s\n\nGot:\n%s", notWantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, }) } diff --git a/test/testdata/fixtures/manager/ods.yaml b/test/testdata/fixtures/manager/ods.yaml index b32749f6..ff4ce326 100644 --- a/test/testdata/fixtures/manager/ods.yaml +++ b/test/testdata/fixtures/manager/ods.yaml @@ -2,7 +2,7 @@ pipeline: tasks: - name: go-helm-build taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-1-0 params: - name: DOCKER_CONTEXT @@ -16,7 +16,7 @@ pipeline: workspace: shared-workspace - name: go-helm-deploy taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-1-0 runAfter: - go-helm-build diff --git a/test/testdata/golden/manager/pipeline.yaml b/test/testdata/golden/manager/pipeline.yaml index c0c1a9b4..082879ca 100644 --- a/test/testdata/golden/manager/pipeline.yaml +++ b/test/testdata/golden/manager/pipeline.yaml @@ -24,7 +24,7 @@ spec: - name: aggregate-tasks-status value: $(tasks.status) taskRef: - kind: ClusterTask + kind: Task name: ods-finish-v0-1-0 workspaces: - name: source @@ -77,7 +77,7 @@ spec: - name: version value: $(params.version) taskRef: - kind: ClusterTask + kind: Task name: ods-start-v0-1-0 workspaces: - name: source @@ -93,7 +93,7 @@ spec: runAfter: - ods-start taskRef: - kind: ClusterTask + kind: Task name: ods-build-go-v0-1-0 workspaces: - name: source @@ -113,7 +113,7 @@ spec: runAfter: - go-helm-build taskRef: - kind: ClusterTask + kind: Task name: ods-deploy-helm-v0-1-0 workspaces: - name: source