diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index dc39e31bcd9..ac16988e453 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,16 +13,16 @@ "-v", "keda-gomodcache:/go/pkg", // Cache vscode exentsions installs and homedir "-v", "keda-vscodecache:/root/.vscode-server", - + // Mount docker socket for docker builds "-v", "/var/run/docker.sock:/var/run/docker.sock", "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], - // Use 'settings' to set *default* container specific settings.json values on container create. + // Use 'settings' to set *default* container specific settings.json values on container create. // You can edit these settings after create using File > Preferences > Settings > Remote. - "settings": { + "settings": { "terminal.integrated.shell.linux": "/bin/bash", "go.gopath": "/go" }, @@ -31,4 +31,4 @@ "extensions": [ "ms-vscode.go" ] -} \ No newline at end of file +} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9251b217eb4..e5bc4092c2d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,3 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence -* @ahmelsayed @zroubalik +* @ahmelsayed @zroubalik diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index c03211e509a..c4d23329310 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -23,4 +23,4 @@ A clear and concise description of what the bug is. - **KEDA Version:** *Please elaborate* - **Platform & Version:** *Please elaborate* - **Kubernetes Version:** *Please elaborate* - - **Scaler(s):** *Please elaborate* \ No newline at end of file + - **Scaler(s):** *Please elaborate* diff --git a/.github/ISSUE_TEMPLATE/Suggest_scaler.md b/.github/ISSUE_TEMPLATE/Suggest_scaler.md index f056d3d3200..68bcf2452e5 100644 --- a/.github/ISSUE_TEMPLATE/Suggest_scaler.md +++ b/.github/ISSUE_TEMPLATE/Suggest_scaler.md @@ -8,4 +8,4 @@ A clear and concise description of what scaler you'd like to use and how you'd w - **Scaler Source:** *Please elaborate* - **How do you want to scale:** *Please elaborate* - - **Authentication:** *Please elaborate* \ No newline at end of file + - **Authentication:** *Please elaborate* diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 5c17ef11a0a..d60106bb583 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,4 +2,4 @@ blank_issues_enabled: false contact_links: - name: Ask a question or get support url: https://github.com/kedacore/keda/discussions/new - about: Ask a question or request support for using KEDA \ No newline at end of file + about: Ask a question or request support for using KEDA diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ca3b2b438ff..06364ad4c99 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,5 @@ diff --git a/.github/workflows/code-quality.yaml b/.github/workflows/code-quality.yaml new file mode 100644 index 00000000000..c8e74c4514b --- /dev/null +++ b/.github/workflows/code-quality.yaml @@ -0,0 +1,25 @@ +name: Code Quality +on: + push: + branches: + - master + - v2 + pull_request: + branches: + - master + - v2 +jobs: + statics: + name: Static checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v1 + - uses: actions/setup-go@v2-beta + with: + go-version: 1.15 + - name: install go-lint + run: | + go get -u golang.org/x/lint/golint + export PATH=$PATH:$(go list -f {{.Target}} golang.org/x/lint/golint) + - uses: pre-commit/action@v1.0.1 diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 848b0a6f84a..c13788cc356 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -22,7 +22,7 @@ jobs: - name: Verify Generated clientset is up to date run: make clientset-verify - + - name: Build run: make build diff --git a/.gitignore b/.gitignore index c3c5921dafc..9ba94bc802c 100644 --- a/.gitignore +++ b/.gitignore @@ -346,4 +346,4 @@ vendor cover.out # GO debug binary -cmd/manager/debug.test \ No newline at end of file +cmd/manager/debug.test diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..1b16d30b3f3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,77 @@ +default_stages: [commit, push] +minimum_pre_commit_version: "1.20.0" +repos: + - repo: git://github.com/dnephin/pre-commit-golang + rev: master + hooks: + - id: go-fmt + - id: go-lint + exclude: | + (?x)( + .*zz_generated.*| + ^api/v1alpha1/condition_types\.go$| + ^api/v1alpha1/groupversion_info\.go$| + ^api/v1alpha1/gvkr_types\.go$| + ^api/v1alpha1/scaledjob_types\.go$| + ^api/v1alpha1/scaledobject_types\.go$| + ^api/v1alpha1/triggerauthentication_types\.go$| + ^controllers/scaledjob_controller\.go$| + ^controllers/scaledobject_controller\.go$| + ^controllers/util/status\.go$| + ^controllers/util/string_lists\.go$| + ^hack/tools\.go$| + ^pkg/scalers/artemis_scaler\.go$| + ^pkg/scalers/azure/azure_aad_podidentity\.go$| + ^pkg/scalers/azure/azure_eventhub\.go$| + ^pkg/scalers/azure/azure_monitor\.go$| + ^pkg/scalers/azure/azure_queue\.go$| + ^pkg/scalers/azure/azure_storage\.go$| + ^pkg/scalers/azure_eventhub_scaler\.go$| + ^pkg/scalers/azure_queue_scaler\.go$| + ^pkg/scalers/azure_servicebus_scaler\.go$| + ^pkg/scalers/cron_scaler\.go$| + ^pkg/scalers/external_scaler\.go$| + ^pkg/scalers/kafka_scram_client\.go$| + ^pkg/scalers/liiklus_scaler\.go$| + ^pkg/scalers/postgresql_scaler\.go$| + ^pkg/scalers/rabbitmq_scaler\.go$| + ^pkg/scalers/rabbitmq_scaler_test\.go$| + ^pkg/scalers/scaler\.go$| + ^pkg/scaling/executor/scale_executor\.go$| + ^pkg/scaling/resolver/hashicorpvault_handler\.go$| + ^pkg/scaling/resolver/scale_resolvers\.go$| + ^pkg/util/gvkr\.go$| + ^pkg/util/k8sversion\.go$| + ^pkg/util/normalize_string\.go$| + ^version/version\.go$ + ) + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: trailing-whitespace + - id: detect-private-key + - id: end-of-file-fixer + - id: check-merge-conflict + - id: mixed-line-ending + - repo: https://github.com/thlorenz/doctoc.git + rev: v1.4.0 + hooks: + - id: doctoc + name: Add TOC for md files + files: ^README\.md$|^CONTRIBUTING\.md$ + args: + - "--maxlevel" + - "2" + - repo: local + hooks: + - id: language-matters + language: pygrep + name: Check for language that we do not accept as community + description: Please use "deny_list" or "allow_list" instead. + entry: "(?i)(black|white)[_-]?(list|List)" + pass_filenames: true + - id: sort-scalers + name: Check if scalers are sorted in scaler_handler.go + language: system + entry: "bash tools/sort_scalers.sh" + files: .*scale_handler\.go$ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 31e39b51f6d..902cb0f2205 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,6 +4,19 @@ Thanks for helping make KEDA better 😍. There are many areas we can use contributions - ranging from code, documentation, feature proposals, issue triage, samples, and content creation. + + +**Table of contents** + +- [Getting Help](#getting-help) +- [Contributing Scalers](#contributing-scalers) +- [Including Documentation Changes](#including-documentation-changes) +- [Creating and building a local environment](#creating-and-building-a-local-environment) +- [Developer Certificate of Origin: Signing your work](#developer-certificate-of-origin-signing-your-work) +- [Code Quality](#code-quality) + + + ## Getting Help If you have a question about KEDA or how best to contribute, the [#KEDA](https://kubernetes.slack.com/archives/CKZJ36A5D) channel on the Kubernetes slack channel ([get an invite if you don't have one already](https://slack.k8s.io/)) is a good place to start. We also have regular [community stand-ups](https://github.com/kedacore/keda#community-standup) to track ongoing work and discuss areas of contribution. For any issues with the product you can [create an issue](https://github.com/kedacore/keda/issues/new) in this repo. @@ -68,3 +81,29 @@ git add -A git commit -sm "one commit on " git push --force ``` + +## Code Quality + +This project is using [pre-commits](https://pre-commit.com) to ensure the quality of the code. +We encourage you to use pre-commits, but it's not a required to contribute. Every change is checked +on CI and if it does not pass the tests it cannot be accepted. If you want to check locally then +you should install Python3.6 or newer together and run: +```bash +pip install pre-commit +# or +brew install pre-commit +``` +For more installation options visit the [pre-commits](https://pre-commit.com). + +To turn on pre-commit checks for commit operations in git, run: +```bash +pre-commit install +``` +To run all checks on your staged files, run: +```bash +pre-commit run +``` +To run all checks on all files, run: +```bash +pre-commit run --all-files +``` diff --git a/Dockerfile.adapter b/Dockerfile.adapter index 63b799d103e..e7161b505c4 100644 --- a/Dockerfile.adapter +++ b/Dockerfile.adapter @@ -34,4 +34,4 @@ COPY --from=builder /workspace/bin/keda-adapter . USER nonroot:nonroot -ENTRYPOINT ["/keda-adapter", "--secure-port=6443", "--logtostderr=true", "--v=0"] \ No newline at end of file +ENTRYPOINT ["/keda-adapter", "--secure-port=6443", "--logtostderr=true", "--v=0"] diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 5234e9f3fd3..ebf8d2a0380 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -20,7 +20,7 @@ maintainers over the course of a one week voting period. At the end of the week, votes are counted and a pull request is made on the repo adding the new maintainer to the [MAINTAINERS](MAINTAINERS.md) file. -Individuals interested in becoming maintainers may submit an [issue](https://github.com/kedacore/keda/issues/new) +Individuals interested in becoming maintainers may submit an [issue](https://github.com/kedacore/keda/issues/new) stating their interest. Existing maintainers can choose if they would like to nominate these individuals to be a maintainer following the process above. diff --git a/LICENSE b/LICENSE index 694990cc186..2d550e82ef4 100644 --- a/LICENSE +++ b/LICENSE @@ -201,4 +201,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 9ec053a7bbb..70ec76cc12a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,4 +16,4 @@ | -------------------- | --------------------------------------------- | ----------- | | Aarthi Saravanakumar | [Aarthisk](https://github.com/Aarthisk) | Microsoft | | Yaron Schneider | [yaron2](https://github.com/yaron2) | Microsoft | -| Ben Browning | [bbrowning](https://github.com/bbrowning) | Red Hat | \ No newline at end of file +| Ben Browning | [bbrowning](https://github.com/bbrowning) | Red Hat | diff --git a/README.md b/README.md index 48e26d3d5d9..31dfc57529c 100644 --- a/README.md +++ b/README.md @@ -17,16 +17,37 @@ Make sure to remove previous KEDA (including CRD) from the cluster. Switch to th Twitter

-KEDA allows for fine-grained autoscaling (including to/from zero) for event driven Kubernetes workloads. KEDA serves -as a Kubernetes Metrics Server and allows users to define autoscaling rules using a dedicated Kubernetes custom +KEDA allows for fine-grained autoscaling (including to/from zero) for event driven Kubernetes workloads. KEDA serves +as a Kubernetes Metrics Server and allows users to define autoscaling rules using a dedicated Kubernetes custom resource definition. -KEDA can run on both the cloud and the edge, integrates natively with Kubernetes components such as the Horizontal +KEDA can run on both the cloud and the edge, integrates natively with Kubernetes components such as the Horizontal Pod Autoscaler, and has no external dependencies. We are a Cloud Native Computing Foundation (CNCF) sandbox project. - + + + +**Table of contents** + +- [Getting started](#getting-started) +- [Deploying KEDA](#deploying-keda) +- [Documentation](#documentation) +- [FAQ](#faq) +- [Samples](#samples) +- [Releases](#releases) +- [Contributing](#contributing) +- [Community](#community) +- [Building: Quick start with Visual Studio Code Remote - Containers](#building-quick-start-with-visual-studio-code-remote---containers) +- [Building: Locally directly](#building-locally-directly) +- [Deploying: Custom KEDA locally outside cluster](#deploying-custom-keda-locally-outside-cluster) +- [Deploying: Custom KEDA as an image](#deploying-custom-keda-as-an-image) +- [Setting log levels](#setting-log-levels) + + + + ## Getting started * [QuickStart - RabbitMQ and Go](https://github.com/kedacore/sample-go-rabbitmq) @@ -62,19 +83,19 @@ You can find Contributing guide [here](./CONTRIBUTING.md) If interested in contributing or participating in the direction of KEDA, you can join our community meetings. -* **Meeting time:** Bi-weekly Thurs 16:00 UTC (does follow US daylight savings). +* **Meeting time:** Bi-weekly Thurs 16:00 UTC (does follow US daylight savings). ([Subscribe to Google Agenda](https://calendar.google.com/calendar?cid=bjE0bjJtNWM0MHVmam1ob2ExcTgwdXVkOThAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ) | [Convert to your timezone](https://www.thetimezoneconverter.com/?t=04%3A00%20pm&tz=UTC)) * **Zoom link:** [https://zoom.us/j/150360492 ](https://zoom.us/j/150360492 ) * **Meeting agenda:** [https://hackmd.io/s/r127ErYiN](https://hackmd.io/s/r127ErYiN) -Just want to learn or chat about KEDA? Feel free to join the conversation in +Just want to learn or chat about KEDA? Feel free to join the conversation in **[#KEDA](https://kubernetes.slack.com/messages/CKZJ36A5D)** on the **[Kubernetes Slack](https://slack.k8s.io/)**! ## Building: Quick start with [Visual Studio Code Remote - Containers](https://code.visualstudio.com/docs/remote/containers) -This helps you pull and build quickly - dev containers launch the project inside a container with all the tooling -required for a consistent and seamless developer experience. +This helps you pull and build quickly - dev containers launch the project inside a container with all the tooling +required for a consistent and seamless developer experience. This means you don't have to install and configure your dev environment as the container handles this for you. @@ -92,11 +113,11 @@ code . Once VSCode launches run `CTRL+SHIFT+P -> Remote-Containers: Reopen in container` and then use the integrated terminal to run: -```bash +```bash make build ``` -> Note: The first time you run the container it will take some time to build and install the tooling. The image +> Note: The first time you run the container it will take some time to build and install the tooling. The image > will be cached so this is only required the first time. ## Building: Locally directly @@ -127,9 +148,11 @@ go env -w GOPROXY=https://proxy.golang.org,direct GOSUMDB=sum.golang.org ``` ## Deploying: Custom KEDA locally outside cluster + The Operator SDK framework allows you to run the operator/controller locally outside the cluster without - a need of building an image. This should help during development/debugging of KEDA Operator or Scalers. -> Note: This approach works only on Linux or macOS. +a need of building an image. This should help during development/debugging of KEDA Operator or Scalers. +> Note: This approach works only on Linux or macOS. + To have fully operational KEDA we need to deploy Metrics Server first. @@ -145,11 +168,11 @@ To have fully operational KEDA we need to deploy Metrics Server first. and change the operator log level via `--zap-log-level=` if needed ```bash make run ARGS="--zap-log-level=debug" - ``` + ``` ## Deploying: Custom KEDA as an image -If you want to change KEDA's behaviour, or if you have created a new scaler (more docs on this to come) and you want +If you want to change KEDA's behaviour, or if you have created a new scaler (more docs on this to come) and you want to deploy it as part of KEDA. Do the following: 1. Make your change in the code. @@ -162,7 +185,7 @@ to deploy it as part of KEDA. Do the following: ```bash IMAGE_REPO=johndoe make deploy ``` -4. Once the keda pods are up, check the logs to verify everything running ok, eg: +4. Once the keda pods are up, check the logs to verify everything running ok, eg: ```bash kubectl get pods --no-headers -n keda | awk '{print $1}' | grep keda-operator | xargs kubectl -n keda logs -f diff --git a/RELEASE-PROCESS.MD b/RELEASE-PROCESS.MD index 4a590a235ac..a44415bdd54 100644 --- a/RELEASE-PROCESS.MD +++ b/RELEASE-PROCESS.MD @@ -1,6 +1,6 @@ # Release Process -The release process of a new version of KEDA involves the following: +The release process of a new version of KEDA involves the following: ## 0. Prerequisites @@ -10,13 +10,13 @@ The next version will thus be 1.2.0 ## 1. Changelog -Provide a new section in `CHANGELOG.md` for the new version that is being released along with the new features, patches and deprecations it introduces. +Provide a new section in `CHANGELOG.md` for the new version that is being released along with the new features, patches and deprecations it introduces. It should not include every single change but solely what matters to our customers, for example issue template that has changed is not important. ## 2. Create KEDA release on GitHub -Creating a new release in the releases page (https://github.com/kedacore/keda/release) will trigger a GitHub workflow which will create a new image with the latest code and tagged with the next version (in this example 1.2.0). +Creating a new release in the releases page (https://github.com/kedacore/keda/release) will trigger a GitHub workflow which will create a new image with the latest code and tagged with the next version (in this example 1.2.0). KEDA Deployment YAML file (eg. keda-1.2.0.yaml) is also automatically created and attached to the Release as part of the workflow. @@ -30,14 +30,14 @@ See [docs](https://github.com/kedacore/keda-docs#publishing-a-new-version). ## 4. Update Helm Charts -a). Update the version and appVersion here: https://github.com/kedacore/charts/blob/master/keda/Chart.yaml +a). Update the version and appVersion here: https://github.com/kedacore/charts/blob/master/keda/Chart.yaml b). In the image section update the keda and metricsAdapter to point to the docker images from step 1 https://github.com/kedacore/charts/blob/master/keda/values.yaml Then run the commands here: https://github.com/kedacore/charts -- To deploy KEDA through Azure Functions Core Tools -- -Update the following file: +Update the following file: https://github.com/Azure/azure-functions-core-tools/blob/dev/src/Azure.Functions.Cli/StaticResources/keda.yaml [Search for 1.1.0 etc. and replace it] diff --git a/config/crd/patches/scaledjob_patch.yaml b/config/crd/patches/scaledjob_patch.yaml index d65fcfdc075..a6e9c41b4a2 100644 --- a/config/crd/patches/scaledjob_patch.yaml +++ b/config/crd/patches/scaledjob_patch.yaml @@ -9,4 +9,4 @@ - op: add path: /spec/validation/openAPIV3Schema/properties/spec/properties/jobTargetRef/properties/template/properties/spec/properties/initContainers/items/properties/ports/items/required/- - value: protocol \ No newline at end of file + value: protocol diff --git a/config/general/kustomization.yaml b/config/general/kustomization.yaml index 55d60081f57..29f4bdc8d83 100644 --- a/config/general/kustomization.yaml +++ b/config/general/kustomization.yaml @@ -1,4 +1,3 @@ resources: - namespace.yaml - service_account.yaml - diff --git a/config/metrics-server/role.yaml b/config/metrics-server/role.yaml index 5d22d37ec43..8a538385704 100644 --- a/config/metrics-server/role.yaml +++ b/config/metrics-server/role.yaml @@ -12,4 +12,4 @@ rules: resources: - '*' verbs: - - '*' \ No newline at end of file + - '*' diff --git a/config/metrics-server/role_binding.yaml b/config/metrics-server/role_binding.yaml index 9bc12043cfe..1bf6c6e5ac4 100644 --- a/config/metrics-server/role_binding.yaml +++ b/config/metrics-server/role_binding.yaml @@ -48,4 +48,4 @@ roleRef: subjects: - kind: ServiceAccount name: horizontal-pod-autoscaler - namespace: kube-system \ No newline at end of file + namespace: kube-system diff --git a/config/metrics-server/service.yaml b/config/metrics-server/service.yaml index d5b2df805fc..e1281b12bae 100644 --- a/config/metrics-server/service.yaml +++ b/config/metrics-server/service.yaml @@ -17,4 +17,4 @@ spec: port: 80 targetPort: 8080 selector: - app: keda-metrics-apiserver \ No newline at end of file + app: keda-metrics-apiserver diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 8d1c8256868..27553551136 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -6,4 +6,4 @@ commonLabels: resources: - role.yaml -- role_binding.yaml \ No newline at end of file +- role_binding.yaml diff --git a/config/samples/keda_v1alpha1_scaledjob.yaml b/config/samples/keda_v1alpha1_scaledjob.yaml index 3e3d203f683..e9443b0b6d7 100644 --- a/config/samples/keda_v1alpha1_scaledjob.yaml +++ b/config/samples/keda_v1alpha1_scaledjob.yaml @@ -4,10 +4,10 @@ metadata: name: scaledjob-sample spec: jobTargetRef: - parallelism: 1 - completions: 1 - activeDeadlineSeconds: 600 - backoffLimit: 6 + parallelism: 1 + completions: 1 + activeDeadlineSeconds: 600 + backoffLimit: 6 template: ## template pollingInterval: 30 @@ -17,4 +17,4 @@ spec: triggers: - type: example-trigger metadata: - property: examle-property + property: examle-property diff --git a/config/samples/keda_v1alpha1_scaledobject.yaml b/config/samples/keda_v1alpha1_scaledobject.yaml index 62995785630..a4c87b479bd 100644 --- a/config/samples/keda_v1alpha1_scaledobject.yaml +++ b/config/samples/keda_v1alpha1_scaledobject.yaml @@ -12,4 +12,4 @@ spec: triggers: - type: example-trigger metadata: - property: examle-property \ No newline at end of file + property: examle-property diff --git a/config/samples/keda_v1alpha1_triggerauthentication.yaml b/config/samples/keda_v1alpha1_triggerauthentication.yaml index 328a3a9a84a..bc4c5840a30 100644 --- a/config/samples/keda_v1alpha1_triggerauthentication.yaml +++ b/config/samples/keda_v1alpha1_triggerauthentication.yaml @@ -6,4 +6,4 @@ spec: secretTargetRef: - parameter: exmaple-secret-parameter name: example-secret-name - key: example-role-key \ No newline at end of file + key: example-role-key diff --git a/hack/LiiklusService.proto b/hack/LiiklusService.proto index 307cd7278f6..2754abbba8a 100644 --- a/hack/LiiklusService.proto +++ b/hack/LiiklusService.proto @@ -136,4 +136,4 @@ message GetEndOffsetsRequest { message GetEndOffsetsReply { map offsets = 1; -} \ No newline at end of file +} diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 22a8df315b1..979a3b81f84 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -12,4 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ \ No newline at end of file +*/ diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh index c2fcdf90f37..cefcd4677a7 100755 --- a/hack/verify-codegen.sh +++ b/hack/verify-codegen.sh @@ -24,7 +24,7 @@ cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" # Kubebuilder project layout has api under 'api/v1alpha1' # client-go codegen expects group name in the path ie. 'api/keda/v1alpha' -# Because there's no way how to modify any of these settings, +# Because there's no way how to modify any of these settings, # we need to hack things a little bit (replace the name of package) find "${DIFFROOT}/generated" -type f -name "*.go" | xargs sed -i "s#github.com/kedacore/keda/api/keda/v1alpha1#github.com/kedacore/keda/api/v1alpha1#g" diff --git a/images/keda-icon.svg b/images/keda-icon.svg index 8e2576cd9f9..9fe3f44d84c 100644 --- a/images/keda-icon.svg +++ b/images/keda-icon.svg @@ -34,4 +34,4 @@ - \ No newline at end of file + diff --git a/images/keda-word.svg b/images/keda-word.svg index bbb5073c8a6..d847986266b 100644 --- a/images/keda-word.svg +++ b/images/keda-word.svg @@ -9,4 +9,4 @@ - \ No newline at end of file + diff --git a/pkg/scalers/azure_servicebus_scaler_test.go b/pkg/scalers/azure_servicebus_scaler_test.go index 4a63a41dfe3..d90285ac495 100755 --- a/pkg/scalers/azure_servicebus_scaler_test.go +++ b/pkg/scalers/azure_servicebus_scaler_test.go @@ -103,12 +103,12 @@ func TestGetServiceBusLength(t *testing.T) { t.Logf("\tQueue '%s' has 1 message\n", queueName) t.Logf("\tTopic '%s' with subscription '%s' has 1 message\n", topicName, subscriptionName) - connection_string := os.Getenv("SERVICEBUS_CONNECTION_STRING") + connectionString := os.Getenv("SERVICEBUS_CONNECTION_STRING") for _, scaler := range getServiceBusLengthTestScalers { - if connection_string != "" { + if connectionString != "" { // Can actually test that numbers return - scaler.metadata.connection = connection_string + scaler.metadata.connection = connectionString length, err := scaler.GetAzureServiceBusLength(context.TODO()) if err != nil { diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index b3d749b2190..c2868357c7a 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -129,8 +129,8 @@ func (s *prometheusScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec { func (s *prometheusScaler) ExecutePromQuery() (float64, error) { t := time.Now().UTC().Format(time.RFC3339) - query_escaped := url_pkg.QueryEscape(s.metadata.query) - url := fmt.Sprintf("%s/api/v1/query?query=%s&time=%s", s.metadata.serverAddress, query_escaped, t) + queryEscaped := url_pkg.QueryEscape(s.metadata.query) + url := fmt.Sprintf("%s/api/v1/query?query=%s&time=%s", s.metadata.serverAddress, queryEscaped, t) r, err := http.Get(url) if err != nil { return -1, err diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go index ee2047c266f..f2bfaab25fa 100644 --- a/pkg/scalers/rabbitmq_scaler_test.go +++ b/pkg/scalers/rabbitmq_scaler_test.go @@ -101,24 +101,24 @@ var vhost_pathes = []string{"/myhost", "", "/", "//", "/%2F"} func TestGetQueueInfo(t *testing.T) { for _, testData := range testQueueInfoTestData { - for _, vhost_path := range vhost_pathes { - expeced_vhost := "myhost" + for _, vhostPath := range vhost_pathes { + expecedVhost := "myhost" - if vhost_path != "/myhost" { - expeced_vhost = "%2F" + if vhostPath != "/myhost" { + expecedVhost = "%2F" } var apiStub = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - expeced_path := "/api/queues/" + expeced_vhost + "/evaluate_trials" - if r.RequestURI != expeced_path { - t.Error("Expect request path to =", expeced_path, "but it is", r.RequestURI) + expecedPath := "/api/queues/" + expecedVhost + "/evaluate_trials" + if r.RequestURI != expecedPath { + t.Error("Expect request path to =", expecedPath, "but it is", r.RequestURI) } w.WriteHeader(testData.responseStatus) w.Write([]byte(testData.response)) })) - resolvedEnv := map[string]string{apiHost: fmt.Sprintf("%s%s", apiStub.URL, vhost_path)} + resolvedEnv := map[string]string{apiHost: fmt.Sprintf("%s%s", apiStub.URL, vhostPath)} metadata := map[string]string{ "queueLength": "10", diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go index 10e412b7705..05634be953f 100644 --- a/pkg/scaling/executor/scale_jobs.go +++ b/pkg/scaling/executor/scale_jobs.go @@ -23,7 +23,7 @@ const ( func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) { logger := e.logger.WithValues("scaledJob.Name", scaledJob.Name, "scaledJob.Namespace", scaledJob.Namespace) - + runningJobCount := e.getRunningJobCount(scaledJob, maxScale) logger.Info("Scaling Jobs", "Number of running Jobs", runningJobCount) diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 7127422852d..e473f8c49e5 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -363,6 +363,7 @@ func (h *scaleHandler) getPods(scalableObject interface{}) (*corev1.PodTemplateS } func buildScaler(name, namespace, triggerType string, resolvedEnv, triggerMetadata, authParams map[string]string, podIdentity string) (scalers.Scaler, error) { + // TRIGGERS-START switch triggerType { case "artemis-queue": return scalers.NewArtemisQueueScaler(resolvedEnv, triggerMetadata, authParams) @@ -413,6 +414,7 @@ func buildScaler(name, namespace, triggerType string, resolvedEnv, triggerMetada default: return nil, fmt.Errorf("no scaler found for type: %s", triggerType) } + // TRIGGERS-END } func asDuckWithTriggers(scalableObject interface{}) (*kedav1alpha1.WithTriggers, error) { diff --git a/tests/.env b/tests/.env index 64f3d3a5edb..b3e85a63835 100644 --- a/tests/.env +++ b/tests/.env @@ -3,4 +3,4 @@ AZURE_SP_KEY= AZURE_SP_TENANT= AZURE_SUBSCRIPTION= AZURE_RESOURCE_GROUP= -TEST_STORAGE_CONNECTION_STRING= \ No newline at end of file +TEST_STORAGE_CONNECTION_STRING= diff --git a/tests/README.md b/tests/README.md index 4d52fcea5e5..91a440879d4 100644 --- a/tests/README.md +++ b/tests/README.md @@ -113,6 +113,3 @@ test.after.always('remove redis and my deployment', t => { ```ts test.serial.only('this will be the only test to run', t => { }); ``` - - - diff --git a/tests/run-all.sh b/tests/run-all.sh index 729e6d412da..0035ce5df8e 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -83,4 +83,4 @@ then else print_failed exit 1 -fi \ No newline at end of file +fi diff --git a/tests/scalers/artemis-helpers.ts b/tests/scalers/artemis-helpers.ts index 07060b3fc95..e5235576632 100644 --- a/tests/scalers/artemis-helpers.ts +++ b/tests/scalers/artemis-helpers.ts @@ -13,17 +13,17 @@ export class ArtemisHelper { sh.exec(`kubectl -n ${artemisNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis deployment should work.' ) t.is( - 0, + 0, sh.exec(`kubectl -n ${artemisNamespace} wait --for=condition=available --timeout=600s deployment/artemis-activemq`).code, 'Artemis should be available.' ) - + } static installArtemisSecret(t, testNamespace: string) { const tmpFile = tmp.fileSync() fs.writeFileSync(tmpFile.name, artemisSecretYaml) sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating secrets should work.' - + } static publishMessages(t, testNamespace: string) { @@ -32,7 +32,7 @@ export class ArtemisHelper { t.is( 0, sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis producer deployment should work.' - ) + ) } static installConsumer(t, testNamespace: string) { @@ -41,7 +41,7 @@ export class ArtemisHelper { t.is( 0, sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis consumer deployment should work.' - ) + ) } static uninstallArtemis(t, artemisNamespace: string){ @@ -56,7 +56,7 @@ export class ArtemisHelper { fs.writeFileSync(tmpFile.name, consumerYaml) sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`) fs.writeFileSync(tmpFile.name, producerYaml) - sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`) + sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`) } } @@ -279,4 +279,4 @@ spec: value: "61616" restartPolicy: Never backoffLimit: 4 -` \ No newline at end of file +` diff --git a/tests/scalers/artemis.test.ts b/tests/scalers/artemis.test.ts index f9837303c45..ff5ba430557 100644 --- a/tests/scalers/artemis.test.ts +++ b/tests/scalers/artemis.test.ts @@ -21,11 +21,11 @@ test.before(t => { ArtemisHelper.installConsumer(t, testNamespace) ArtemisHelper.publishMessages(t, testNamespace) -}); +}); test.serial('Deployment should have 0 replicas on start', t => { const replicaCount = sh.exec(`kubectl get deployment.apps/kedartemis-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"`).stdout - + t.log('replica count: %s', replicaCount); t.is(replicaCount, '0', 'replica count should start out as 0') }) @@ -37,8 +37,8 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b t.is( 0, sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating scaledObject should work.' - ) - + ) + // with messages published, the consumer deployment should start receiving the messages let replicaCount = '0' for (let i = 0; i < 10 && replicaCount !== '5'; i++) { @@ -50,9 +50,9 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b sh.exec('sleep 5s') } } - + t.is('5', replicaCount, 'Replica count should be 5 after 10 seconds') - + for (let i = 0; i < 50 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/kedartemis-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` @@ -61,7 +61,7 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b sh.exec('sleep 5s') } } - + t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') }) @@ -110,4 +110,4 @@ spec: brokerAddress: "test" authenticationRef: name: trigger-auth-kedartemis -` \ No newline at end of file +` diff --git a/tests/scalers/azure-blob.test.ts b/tests/scalers/azure-blob.test.ts index 57c336fc60e..473df01a0d3 100644 --- a/tests/scalers/azure-blob.test.ts +++ b/tests/scalers/azure-blob.test.ts @@ -139,4 +139,4 @@ spec: metadata: blobContainerName: container-name blobPrefix: blobsubpath - connection: AzureWebJobsStorage` \ No newline at end of file + connection: AzureWebJobsStorage` diff --git a/tests/scalers/prometheus-deployment.yaml b/tests/scalers/prometheus-deployment.yaml index 0490ede773e..6fcf9f02a0c 100644 --- a/tests/scalers/prometheus-deployment.yaml +++ b/tests/scalers/prometheus-deployment.yaml @@ -13,10 +13,10 @@ metadata: data: alerting_rules.yml: | {} - + alerts: | {} - + prometheus.yml: | global: evaluation_interval: 1m @@ -262,13 +262,13 @@ data: target_label: kubernetes_pod_name scrape_interval: 5m scrape_timeout: 30s - + recording_rules.yml: | {} - + rules: | {} - + --- # Source: prometheus/templates/server-serviceaccount.yaml @@ -284,7 +284,7 @@ metadata: name: prometheus-server annotations: {} - + --- # Source: prometheus/templates/server-clusterrole.yaml @@ -411,7 +411,7 @@ spec: - --webhook-url=http://127.0.0.1:9090/-/reload resources: {} - + volumeMounts: - name: config-volume mountPath: /etc/config @@ -447,7 +447,7 @@ spec: successThreshold: 1 resources: {} - + volumeMounts: - name: config-volume mountPath: /etc/config @@ -459,7 +459,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - + terminationGracePeriodSeconds: 300 volumes: - name: config-volume @@ -595,5 +595,3 @@ spec: --- # Source: prometheus/templates/server-vpa.yaml - - diff --git a/tests/scalers/prometheus.test.ts b/tests/scalers/prometheus.test.ts index 151cd4e801b..c8295e77785 100644 --- a/tests/scalers/prometheus.test.ts +++ b/tests/scalers/prometheus.test.ts @@ -24,7 +24,7 @@ test.before(t => { sh.config.silent = true // create deployments - there are two deployments - both using the same image but one deployment - // is directly tied to the KEDA HPA while the other is isolated that can be used for metrics + // is directly tied to the KEDA HPA while the other is isolated that can be used for metrics // even when the KEDA deployment is at zero - the service points to both deployments const tmpFile = tmp.fileSync() fs.writeFileSync(tmpFile.name, deployYaml.replace('{{PROMETHEUS_NAMESPACE}}', prometheusNamespace)) diff --git a/tests/scalers/rabbitmq-helpers.ts b/tests/scalers/rabbitmq-helpers.ts index 6764239828d..650af604df9 100644 --- a/tests/scalers/rabbitmq-helpers.ts +++ b/tests/scalers/rabbitmq-helpers.ts @@ -96,9 +96,9 @@ spec: - image: rabbitmq:3-management name: rabbitmq env: - - name: RABBITMQ_DEFAULT_USER + - name: RABBITMQ_DEFAULT_USER value: "{{USERNAME}}" - - name: RABBITMQ_DEFAULT_PASS + - name: RABBITMQ_DEFAULT_PASS value: "{{PASSWORD}}" - name: RABBITMQ_DEFAULT_VHOST value: "{{VHOST}}" diff --git a/tests/scalers/rabbitmq-queue-amqp.test.ts b/tests/scalers/rabbitmq-queue-amqp.test.ts index f7531c2c500..203475dfc29 100644 --- a/tests/scalers/rabbitmq-queue-amqp.test.ts +++ b/tests/scalers/rabbitmq-queue-amqp.test.ts @@ -88,7 +88,7 @@ metadata: labels: app: test-deployment spec: - replicas: 0 + replicas: 0 selector: matchLabels: app: test-deployment diff --git a/tests/scalers/rabbitmq-queue-http.test.ts b/tests/scalers/rabbitmq-queue-http.test.ts index d6edfab26ef..47609e3fd0d 100644 --- a/tests/scalers/rabbitmq-queue-http.test.ts +++ b/tests/scalers/rabbitmq-queue-http.test.ts @@ -71,7 +71,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => { sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) } sh.exec(`kubectl delete namespace ${testNamespace}`) - // remove rabbitmq + // remove rabbitmq RabbitMQHelper.uninstallRabbit(rabbitmqNamespace) t.end() }) @@ -90,7 +90,7 @@ metadata: labels: app: test-deployment spec: - replicas: 0 + replicas: 0 selector: matchLabels: app: test-deployment diff --git a/tests/scalers/rabbitmq-queue-trigger-auth.test.ts b/tests/scalers/rabbitmq-queue-trigger-auth.test.ts index 963e0ef43e1..884429b88be 100644 --- a/tests/scalers/rabbitmq-queue-trigger-auth.test.ts +++ b/tests/scalers/rabbitmq-queue-trigger-auth.test.ts @@ -15,7 +15,7 @@ const connectionString = `amqp://${username}:${password}@rabbitmq.${rabbitmqName const messageCount = 500 test.before(t => { - // install rabbitmq + // install rabbitmq RabbitMQHelper.installRabbit(t, username, password, vhost, rabbitmqNamespace) sh.config.silent = true @@ -72,7 +72,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => { } sh.exec(`kubectl delete namespace ${testNamespace}`) - // remove rabbitmq + // remove rabbitmq RabbitMQHelper.uninstallRabbit(rabbitmqNamespace) t.end() }) @@ -91,7 +91,7 @@ metadata: labels: app: test-deployment spec: - replicas: 0 + replicas: 0 selector: matchLabels: app: test-deployment diff --git a/tests/scalers/redis-lists.test.ts b/tests/scalers/redis-lists.test.ts index 03ece64e157..449285f68be 100644 --- a/tests/scalers/redis-lists.test.ts +++ b/tests/scalers/redis-lists.test.ts @@ -555,4 +555,4 @@ spec: args: ["write"] restartPolicy: Never backoffLimit: 4 -` \ No newline at end of file +` diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index e5a669978a3..9c10988d047 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -246,4 +246,4 @@ spec: name: redis-password key: password restartPolicy: Never -` \ No newline at end of file +` diff --git a/tools/build-tools.Dockerfile b/tools/build-tools.Dockerfile index 4a9cdd60ff9..92cafcd0f5a 100644 --- a/tools/build-tools.Dockerfile +++ b/tools/build-tools.Dockerfile @@ -70,4 +70,4 @@ ENV PATH=${PATH}:/usr/local/go/bin \ GOPATH=/go # Install FOSSA tooling -RUN curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install.sh | bash +RUN curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install.sh | bash diff --git a/tools/sort_scalers.sh b/tools/sort_scalers.sh new file mode 100644 index 00000000000..2daefbafcfc --- /dev/null +++ b/tools/sort_scalers.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -euo pipefail + +LEAD='TRIGGERS-START' +TAIL='TRIGGERS-END' + +SCALERS_FILE="pkg/scaling/scale_handler.go" +CURRENT=$(cat "${SCALERS_FILE}" | awk "/${LEAD}/,/${TAIL}/" | grep "case") +SORTED=$(cat "${SCALERS_FILE}" | awk "/${LEAD}/,/${TAIL}/" | grep "case" | sort) + +if [[ "${CURRENT}" == "${SORTED}" ]]; then + echo "Scalers are sorted in ${SCALERS_FILE}" + exit 0 +else + echo "Scalers are not sorted alphabetical in ${SCALERS_FILE}" + exit 1 +fi