[v2] Add pre-commit and CI static checks (#1025)

* Add pre-commit and CI static checks

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* Check if scalers are sorted and check offensive language

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* Add autogenreated tables of content

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* Improve information about pre-commits in KEDA

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* Align casing

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* Fix merge conflict

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>

* fixup! Fix merge conflict

Signed-off-by: Tomek Urbaszek <tomasz.urbaszek@polidea.com>
This commit is contained in:
Tomek Urbaszek 2020-08-26 13:03:39 +02:00 committed by GitHub
parent a768340bcd
commit 02f6da7e1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 288 additions and 111 deletions

View File

@ -13,16 +13,16 @@
"-v", "keda-gomodcache:/go/pkg",
// Cache vscode exentsions installs and homedir
"-v", "keda-vscodecache:/root/.vscode-server",
// Mount docker socket for docker builds
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"
],
// Use 'settings' to set *default* container specific settings.json values on container create.
// Use 'settings' to set *default* container specific settings.json values on container create.
// You can edit these settings after create using File > Preferences > Settings > Remote.
"settings": {
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"go.gopath": "/go"
},
@ -31,4 +31,4 @@
"extensions": [
"ms-vscode.go"
]
}
}

2
.github/CODEOWNERS vendored
View File

@ -1,3 +1,3 @@
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence
* @ahmelsayed @zroubalik
* @ahmelsayed @zroubalik

View File

@ -23,4 +23,4 @@ A clear and concise description of what the bug is.
- **KEDA Version:** *Please elaborate*
- **Platform & Version:** *Please elaborate*
- **Kubernetes Version:** *Please elaborate*
- **Scaler(s):** *Please elaborate*
- **Scaler(s):** *Please elaborate*

View File

@ -8,4 +8,4 @@ A clear and concise description of what scaler you'd like to use and how you'd w
- **Scaler Source:** *Please elaborate*
- **How do you want to scale:** *Please elaborate*
- **Authentication:** *Please elaborate*
- **Authentication:** *Please elaborate*

View File

@ -2,4 +2,4 @@ blank_issues_enabled: false
contact_links:
- name: Ask a question or get support
url: https://github.com/kedacore/keda/discussions/new
about: Ask a question or request support for using KEDA
about: Ask a question or request support for using KEDA

View File

@ -1,5 +1,5 @@
<!-- Thank you for contributing!
Read more about how you can contribute in our contribution guide:
https://github.com/kedacore/keda/blob/master/CONTRIBUTING.md
-->

25
.github/workflows/code-quality.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Code Quality
on:
push:
branches:
- master
- v2
pull_request:
branches:
- master
- v2
jobs:
statics:
name: Static checks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
- uses: actions/setup-go@v2-beta
with:
go-version: 1.15
- name: install go-lint
run: |
go get -u golang.org/x/lint/golint
export PATH=$PATH:$(go list -f {{.Target}} golang.org/x/lint/golint)
- uses: pre-commit/action@v1.0.1

View File

@ -22,7 +22,7 @@ jobs:
- name: Verify Generated clientset is up to date
run: make clientset-verify
- name: Build
run: make build

2
.gitignore vendored
View File

@ -346,4 +346,4 @@ vendor
cover.out
# GO debug binary
cmd/manager/debug.test
cmd/manager/debug.test

77
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,77 @@
default_stages: [commit, push]
minimum_pre_commit_version: "1.20.0"
repos:
- repo: git://github.com/dnephin/pre-commit-golang
rev: master
hooks:
- id: go-fmt
- id: go-lint
exclude: |
(?x)(
.*zz_generated.*|
^api/v1alpha1/condition_types\.go$|
^api/v1alpha1/groupversion_info\.go$|
^api/v1alpha1/gvkr_types\.go$|
^api/v1alpha1/scaledjob_types\.go$|
^api/v1alpha1/scaledobject_types\.go$|
^api/v1alpha1/triggerauthentication_types\.go$|
^controllers/scaledjob_controller\.go$|
^controllers/scaledobject_controller\.go$|
^controllers/util/status\.go$|
^controllers/util/string_lists\.go$|
^hack/tools\.go$|
^pkg/scalers/artemis_scaler\.go$|
^pkg/scalers/azure/azure_aad_podidentity\.go$|
^pkg/scalers/azure/azure_eventhub\.go$|
^pkg/scalers/azure/azure_monitor\.go$|
^pkg/scalers/azure/azure_queue\.go$|
^pkg/scalers/azure/azure_storage\.go$|
^pkg/scalers/azure_eventhub_scaler\.go$|
^pkg/scalers/azure_queue_scaler\.go$|
^pkg/scalers/azure_servicebus_scaler\.go$|
^pkg/scalers/cron_scaler\.go$|
^pkg/scalers/external_scaler\.go$|
^pkg/scalers/kafka_scram_client\.go$|
^pkg/scalers/liiklus_scaler\.go$|
^pkg/scalers/postgresql_scaler\.go$|
^pkg/scalers/rabbitmq_scaler\.go$|
^pkg/scalers/rabbitmq_scaler_test\.go$|
^pkg/scalers/scaler\.go$|
^pkg/scaling/executor/scale_executor\.go$|
^pkg/scaling/resolver/hashicorpvault_handler\.go$|
^pkg/scaling/resolver/scale_resolvers\.go$|
^pkg/util/gvkr\.go$|
^pkg/util/k8sversion\.go$|
^pkg/util/normalize_string\.go$|
^version/version\.go$
)
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: detect-private-key
- id: end-of-file-fixer
- id: check-merge-conflict
- id: mixed-line-ending
- repo: https://github.com/thlorenz/doctoc.git
rev: v1.4.0
hooks:
- id: doctoc
name: Add TOC for md files
files: ^README\.md$|^CONTRIBUTING\.md$
args:
- "--maxlevel"
- "2"
- repo: local
hooks:
- id: language-matters
language: pygrep
name: Check for language that we do not accept as community
description: Please use "deny_list" or "allow_list" instead.
entry: "(?i)(black|white)[_-]?(list|List)"
pass_filenames: true
- id: sort-scalers
name: Check if scalers are sorted in scaler_handler.go
language: system
entry: "bash tools/sort_scalers.sh"
files: .*scale_handler\.go$

View File

@ -4,6 +4,19 @@ Thanks for helping make KEDA better 😍.
There are many areas we can use contributions - ranging from code, documentation, feature proposals, issue triage, samples, and content creation.
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of contents**
- [Getting Help](#getting-help)
- [Contributing Scalers](#contributing-scalers)
- [Including Documentation Changes](#including-documentation-changes)
- [Creating and building a local environment](#creating-and-building-a-local-environment)
- [Developer Certificate of Origin: Signing your work](#developer-certificate-of-origin-signing-your-work)
- [Code Quality](#code-quality)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Getting Help
If you have a question about KEDA or how best to contribute, the [#KEDA](https://kubernetes.slack.com/archives/CKZJ36A5D) channel on the Kubernetes slack channel ([get an invite if you don't have one already](https://slack.k8s.io/)) is a good place to start. We also have regular [community stand-ups](https://github.com/kedacore/keda#community-standup) to track ongoing work and discuss areas of contribution. For any issues with the product you can [create an issue](https://github.com/kedacore/keda/issues/new) in this repo.
@ -68,3 +81,29 @@ git add -A
git commit -sm "one commit on <branch-name>"
git push --force
```
## Code Quality
This project is using [pre-commits](https://pre-commit.com) to ensure the quality of the code.
We encourage you to use pre-commits, but it's not a required to contribute. Every change is checked
on CI and if it does not pass the tests it cannot be accepted. If you want to check locally then
you should install Python3.6 or newer together and run:
```bash
pip install pre-commit
# or
brew install pre-commit
```
For more installation options visit the [pre-commits](https://pre-commit.com).
To turn on pre-commit checks for commit operations in git, run:
```bash
pre-commit install
```
To run all checks on your staged files, run:
```bash
pre-commit run
```
To run all checks on all files, run:
```bash
pre-commit run --all-files
```

View File

@ -34,4 +34,4 @@ COPY --from=builder /workspace/bin/keda-adapter .
USER nonroot:nonroot
ENTRYPOINT ["/keda-adapter", "--secure-port=6443", "--logtostderr=true", "--v=0"]
ENTRYPOINT ["/keda-adapter", "--secure-port=6443", "--logtostderr=true", "--v=0"]

View File

@ -20,7 +20,7 @@ maintainers over the course of a one week voting period. At the end of the week,
votes are counted and a pull request is made on the repo adding the new
maintainer to the [MAINTAINERS](MAINTAINERS.md) file.
Individuals interested in becoming maintainers may submit an [issue](https://github.com/kedacore/keda/issues/new)
Individuals interested in becoming maintainers may submit an [issue](https://github.com/kedacore/keda/issues/new)
stating their interest. Existing maintainers can choose if they would
like to nominate these individuals to be a maintainer following the process
above.

View File

@ -201,4 +201,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.

View File

@ -16,4 +16,4 @@
| -------------------- | --------------------------------------------- | ----------- |
| Aarthi Saravanakumar | [Aarthisk](https://github.com/Aarthisk) | Microsoft |
| Yaron Schneider | [yaron2](https://github.com/yaron2) | Microsoft |
| Ben Browning | [bbrowning](https://github.com/bbrowning) | Red Hat |
| Ben Browning | [bbrowning](https://github.com/bbrowning) | Red Hat |

View File

@ -17,16 +17,37 @@ Make sure to remove previous KEDA (including CRD) from the cluster. Switch to th
<a href="https://bestpractices.coreinfrastructure.org/projects/3791"><img src="https://bestpractices.coreinfrastructure.org/projects/3791/badge"></a>
<a href="https://twitter.com/kedaorg"><img src="https://img.shields.io/twitter/follow/kedaorg?style=social" alt="Twitter"></a></p>
KEDA allows for fine-grained autoscaling (including to/from zero) for event driven Kubernetes workloads. KEDA serves
as a Kubernetes Metrics Server and allows users to define autoscaling rules using a dedicated Kubernetes custom
KEDA allows for fine-grained autoscaling (including to/from zero) for event driven Kubernetes workloads. KEDA serves
as a Kubernetes Metrics Server and allows users to define autoscaling rules using a dedicated Kubernetes custom
resource definition.
KEDA can run on both the cloud and the edge, integrates natively with Kubernetes components such as the Horizontal
KEDA can run on both the cloud and the edge, integrates natively with Kubernetes components such as the Horizontal
Pod Autoscaler, and has no external dependencies.
We are a Cloud Native Computing Foundation (CNCF) sandbox project.
<img src="https://raw.githubusercontent.com/kedacore/keda/master/images/logo-cncf.svg" height="75px">
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of contents**
- [Getting started](#getting-started)
- [Deploying KEDA](#deploying-keda)
- [Documentation](#documentation)
- [FAQ](#faq)
- [Samples](#samples)
- [Releases](#releases)
- [Contributing](#contributing)
- [Community](#community)
- [Building: Quick start with Visual Studio Code Remote - Containers](#building-quick-start-with-visual-studio-code-remote---containers)
- [Building: Locally directly](#building-locally-directly)
- [Deploying: Custom KEDA locally outside cluster](#deploying-custom-keda-locally-outside-cluster)
- [Deploying: Custom KEDA as an image](#deploying-custom-keda-as-an-image)
- [Setting log levels](#setting-log-levels)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Getting started
* [QuickStart - RabbitMQ and Go](https://github.com/kedacore/sample-go-rabbitmq)
@ -62,19 +83,19 @@ You can find Contributing guide [here](./CONTRIBUTING.md)
If interested in contributing or participating in the direction of KEDA, you can join our community meetings.
* **Meeting time:** Bi-weekly Thurs 16:00 UTC (does follow US daylight savings).
* **Meeting time:** Bi-weekly Thurs 16:00 UTC (does follow US daylight savings).
([Subscribe to Google Agenda](https://calendar.google.com/calendar?cid=bjE0bjJtNWM0MHVmam1ob2ExcTgwdXVkOThAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ) |
[Convert to your timezone](https://www.thetimezoneconverter.com/?t=04%3A00%20pm&tz=UTC))
* **Zoom link:** [https://zoom.us/j/150360492 ](https://zoom.us/j/150360492 )
* **Meeting agenda:** [https://hackmd.io/s/r127ErYiN](https://hackmd.io/s/r127ErYiN)
Just want to learn or chat about KEDA? Feel free to join the conversation in
Just want to learn or chat about KEDA? Feel free to join the conversation in
**[#KEDA](https://kubernetes.slack.com/messages/CKZJ36A5D)** on the **[Kubernetes Slack](https://slack.k8s.io/)**!
## Building: Quick start with [Visual Studio Code Remote - Containers](https://code.visualstudio.com/docs/remote/containers)
This helps you pull and build quickly - dev containers launch the project inside a container with all the tooling
required for a consistent and seamless developer experience.
This helps you pull and build quickly - dev containers launch the project inside a container with all the tooling
required for a consistent and seamless developer experience.
This means you don't have to install and configure your dev environment as the container handles this for you.
@ -92,11 +113,11 @@ code .
Once VSCode launches run `CTRL+SHIFT+P -> Remote-Containers: Reopen in container` and then use the integrated
terminal to run:
```bash
```bash
make build
```
> Note: The first time you run the container it will take some time to build and install the tooling. The image
> Note: The first time you run the container it will take some time to build and install the tooling. The image
> will be cached so this is only required the first time.
## Building: Locally directly
@ -127,9 +148,11 @@ go env -w GOPROXY=https://proxy.golang.org,direct GOSUMDB=sum.golang.org
```
## Deploying: Custom KEDA locally outside cluster
The Operator SDK framework allows you to run the operator/controller locally outside the cluster without
a need of building an image. This should help during development/debugging of KEDA Operator or Scalers.
> Note: This approach works only on Linux or macOS.
a need of building an image. This should help during development/debugging of KEDA Operator or Scalers.
> Note: This approach works only on Linux or macOS.
To have fully operational KEDA we need to deploy Metrics Server first.
@ -145,11 +168,11 @@ To have fully operational KEDA we need to deploy Metrics Server first.
and change the operator log level via `--zap-log-level=` if needed
```bash
make run ARGS="--zap-log-level=debug"
```
```
## Deploying: Custom KEDA as an image
If you want to change KEDA's behaviour, or if you have created a new scaler (more docs on this to come) and you want
If you want to change KEDA's behaviour, or if you have created a new scaler (more docs on this to come) and you want
to deploy it as part of KEDA. Do the following:
1. Make your change in the code.
@ -162,7 +185,7 @@ to deploy it as part of KEDA. Do the following:
```bash
IMAGE_REPO=johndoe make deploy
```
4. Once the keda pods are up, check the logs to verify everything running ok, eg:
4. Once the keda pods are up, check the logs to verify everything running ok, eg:
```bash
kubectl get pods --no-headers -n keda | awk '{print $1}' | grep keda-operator | xargs kubectl -n keda logs -f

View File

@ -1,6 +1,6 @@
# Release Process
The release process of a new version of KEDA involves the following:
The release process of a new version of KEDA involves the following:
## 0. Prerequisites
@ -10,13 +10,13 @@ The next version will thus be 1.2.0
## 1. Changelog
Provide a new section in `CHANGELOG.md` for the new version that is being released along with the new features, patches and deprecations it introduces.
Provide a new section in `CHANGELOG.md` for the new version that is being released along with the new features, patches and deprecations it introduces.
It should not include every single change but solely what matters to our customers, for example issue template that has changed is not important.
## 2. Create KEDA release on GitHub
Creating a new release in the releases page (https://github.com/kedacore/keda/release) will trigger a GitHub workflow which will create a new image with the latest code and tagged with the next version (in this example 1.2.0).
Creating a new release in the releases page (https://github.com/kedacore/keda/release) will trigger a GitHub workflow which will create a new image with the latest code and tagged with the next version (in this example 1.2.0).
KEDA Deployment YAML file (eg. keda-1.2.0.yaml) is also automatically created and attached to the Release as part of the workflow.
@ -30,14 +30,14 @@ See [docs](https://github.com/kedacore/keda-docs#publishing-a-new-version).
## 4. Update Helm Charts
a). Update the version and appVersion here: https://github.com/kedacore/charts/blob/master/keda/Chart.yaml
a). Update the version and appVersion here: https://github.com/kedacore/charts/blob/master/keda/Chart.yaml
b). In the image section update the keda and metricsAdapter to point to the docker images from step 1 https://github.com/kedacore/charts/blob/master/keda/values.yaml
Then run the commands here: https://github.com/kedacore/charts
-- To deploy KEDA through Azure Functions Core Tools --
Update the following file:
Update the following file:
https://github.com/Azure/azure-functions-core-tools/blob/dev/src/Azure.Functions.Cli/StaticResources/keda.yaml
[Search for 1.1.0 etc. and replace it]

View File

@ -9,4 +9,4 @@
- op: add
path: /spec/validation/openAPIV3Schema/properties/spec/properties/jobTargetRef/properties/template/properties/spec/properties/initContainers/items/properties/ports/items/required/-
value: protocol
value: protocol

View File

@ -1,4 +1,3 @@
resources:
- namespace.yaml
- service_account.yaml

View File

@ -12,4 +12,4 @@ rules:
resources:
- '*'
verbs:
- '*'
- '*'

View File

@ -48,4 +48,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: horizontal-pod-autoscaler
namespace: kube-system
namespace: kube-system

View File

@ -17,4 +17,4 @@ spec:
port: 80
targetPort: 8080
selector:
app: keda-metrics-apiserver
app: keda-metrics-apiserver

View File

@ -6,4 +6,4 @@ commonLabels:
resources:
- role.yaml
- role_binding.yaml
- role_binding.yaml

View File

@ -4,10 +4,10 @@ metadata:
name: scaledjob-sample
spec:
jobTargetRef:
parallelism: 1
completions: 1
activeDeadlineSeconds: 600
backoffLimit: 6
parallelism: 1
completions: 1
activeDeadlineSeconds: 600
backoffLimit: 6
template:
## template
pollingInterval: 30
@ -17,4 +17,4 @@ spec:
triggers:
- type: example-trigger
metadata:
property: examle-property
property: examle-property

View File

@ -12,4 +12,4 @@ spec:
triggers:
- type: example-trigger
metadata:
property: examle-property
property: examle-property

View File

@ -6,4 +6,4 @@ spec:
secretTargetRef:
- parameter: exmaple-secret-parameter
name: example-secret-name
key: example-role-key
key: example-role-key

View File

@ -136,4 +136,4 @@ message GetEndOffsetsRequest {
message GetEndOffsetsReply {
map<uint32, uint64> offsets = 1;
}
}

View File

@ -12,4 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
*/

View File

@ -24,7 +24,7 @@ cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
# Kubebuilder project layout has api under 'api/v1alpha1'
# client-go codegen expects group name in the path ie. 'api/keda/v1alpha'
# Because there's no way how to modify any of these settings,
# Because there's no way how to modify any of these settings,
# we need to hack things a little bit (replace the name of package)
find "${DIFFROOT}/generated" -type f -name "*.go" | xargs sed -i "s#github.com/kedacore/keda/api/keda/v1alpha1#github.com/kedacore/keda/api/v1alpha1#g"

View File

@ -34,4 +34,4 @@
</g>
</g>
</g>
</svg>
</svg>

Before

Width:  |  Height:  |  Size: 4.4 KiB

After

Width:  |  Height:  |  Size: 4.4 KiB

View File

@ -9,4 +9,4 @@
<g id="Group" transform="translate(681.000000, 166.000000)"></g>
<path d="M746.854953,263.990234 L761,238.386728 L722.345133,238.386728 L734.424779,155.091533 L674.262032,263.990234 L661.003906,263.990234 L730.821289,132.996094 L800.638672,263.990234 L746.854953,263.990234 Z" id="Combined-Shape" fill="#326DE6" fill-rule="nonzero"></path>
</g>
</svg>
</svg>

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -103,12 +103,12 @@ func TestGetServiceBusLength(t *testing.T) {
t.Logf("\tQueue '%s' has 1 message\n", queueName)
t.Logf("\tTopic '%s' with subscription '%s' has 1 message\n", topicName, subscriptionName)
connection_string := os.Getenv("SERVICEBUS_CONNECTION_STRING")
connectionString := os.Getenv("SERVICEBUS_CONNECTION_STRING")
for _, scaler := range getServiceBusLengthTestScalers {
if connection_string != "" {
if connectionString != "" {
// Can actually test that numbers return
scaler.metadata.connection = connection_string
scaler.metadata.connection = connectionString
length, err := scaler.GetAzureServiceBusLength(context.TODO())
if err != nil {

View File

@ -129,8 +129,8 @@ func (s *prometheusScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {
func (s *prometheusScaler) ExecutePromQuery() (float64, error) {
t := time.Now().UTC().Format(time.RFC3339)
query_escaped := url_pkg.QueryEscape(s.metadata.query)
url := fmt.Sprintf("%s/api/v1/query?query=%s&time=%s", s.metadata.serverAddress, query_escaped, t)
queryEscaped := url_pkg.QueryEscape(s.metadata.query)
url := fmt.Sprintf("%s/api/v1/query?query=%s&time=%s", s.metadata.serverAddress, queryEscaped, t)
r, err := http.Get(url)
if err != nil {
return -1, err

View File

@ -101,24 +101,24 @@ var vhost_pathes = []string{"/myhost", "", "/", "//", "/%2F"}
func TestGetQueueInfo(t *testing.T) {
for _, testData := range testQueueInfoTestData {
for _, vhost_path := range vhost_pathes {
expeced_vhost := "myhost"
for _, vhostPath := range vhost_pathes {
expecedVhost := "myhost"
if vhost_path != "/myhost" {
expeced_vhost = "%2F"
if vhostPath != "/myhost" {
expecedVhost = "%2F"
}
var apiStub = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
expeced_path := "/api/queues/" + expeced_vhost + "/evaluate_trials"
if r.RequestURI != expeced_path {
t.Error("Expect request path to =", expeced_path, "but it is", r.RequestURI)
expecedPath := "/api/queues/" + expecedVhost + "/evaluate_trials"
if r.RequestURI != expecedPath {
t.Error("Expect request path to =", expecedPath, "but it is", r.RequestURI)
}
w.WriteHeader(testData.responseStatus)
w.Write([]byte(testData.response))
}))
resolvedEnv := map[string]string{apiHost: fmt.Sprintf("%s%s", apiStub.URL, vhost_path)}
resolvedEnv := map[string]string{apiHost: fmt.Sprintf("%s%s", apiStub.URL, vhostPath)}
metadata := map[string]string{
"queueLength": "10",

View File

@ -23,7 +23,7 @@ const (
func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) {
logger := e.logger.WithValues("scaledJob.Name", scaledJob.Name, "scaledJob.Namespace", scaledJob.Namespace)
runningJobCount := e.getRunningJobCount(scaledJob, maxScale)
logger.Info("Scaling Jobs", "Number of running Jobs", runningJobCount)

View File

@ -364,6 +364,7 @@ func (h *scaleHandler) getPods(scalableObject interface{}) (*corev1.PodTemplateS
}
func buildScaler(name, namespace, triggerType string, resolvedEnv, triggerMetadata, authParams map[string]string, podIdentity string) (scalers.Scaler, error) {
// TRIGGERS-START
switch triggerType {
case "artemis-queue":
return scalers.NewArtemisQueueScaler(resolvedEnv, triggerMetadata, authParams)
@ -414,6 +415,7 @@ func buildScaler(name, namespace, triggerType string, resolvedEnv, triggerMetada
default:
return nil, fmt.Errorf("no scaler found for type: %s", triggerType)
}
// TRIGGERS-END
}
func asDuckWithTriggers(scalableObject interface{}) (*kedav1alpha1.WithTriggers, error) {

View File

@ -3,4 +3,4 @@ AZURE_SP_KEY=
AZURE_SP_TENANT=
AZURE_SUBSCRIPTION=
AZURE_RESOURCE_GROUP=
TEST_STORAGE_CONNECTION_STRING=
TEST_STORAGE_CONNECTION_STRING=

View File

@ -113,6 +113,3 @@ test.after.always('remove redis and my deployment', t => {
```ts
test.serial.only('this will be the only test to run', t => { });
```

View File

@ -83,4 +83,4 @@ then
else
print_failed
exit 1
fi
fi

View File

@ -13,17 +13,17 @@ export class ArtemisHelper {
sh.exec(`kubectl -n ${artemisNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis deployment should work.'
)
t.is(
0,
0,
sh.exec(`kubectl -n ${artemisNamespace} wait --for=condition=available --timeout=600s deployment/artemis-activemq`).code, 'Artemis should be available.'
)
}
static installArtemisSecret(t, testNamespace: string) {
const tmpFile = tmp.fileSync()
fs.writeFileSync(tmpFile.name, artemisSecretYaml)
sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating secrets should work.'
}
static publishMessages(t, testNamespace: string) {
@ -32,7 +32,7 @@ export class ArtemisHelper {
t.is(
0,
sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis producer deployment should work.'
)
)
}
static installConsumer(t, testNamespace: string) {
@ -41,7 +41,7 @@ export class ArtemisHelper {
t.is(
0,
sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating artemis consumer deployment should work.'
)
)
}
static uninstallArtemis(t, artemisNamespace: string){
@ -56,7 +56,7 @@ export class ArtemisHelper {
fs.writeFileSync(tmpFile.name, consumerYaml)
sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`)
fs.writeFileSync(tmpFile.name, producerYaml)
sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`)
sh.exec(`kubectl -n ${testNamespace} delete -f ${tmpFile.name}`)
}
}
@ -279,4 +279,4 @@ spec:
value: "61616"
restartPolicy: Never
backoffLimit: 4
`
`

View File

@ -21,11 +21,11 @@ test.before(t => {
ArtemisHelper.installConsumer(t, testNamespace)
ArtemisHelper.publishMessages(t, testNamespace)
});
});
test.serial('Deployment should have 0 replicas on start', t => {
const replicaCount = sh.exec(`kubectl get deployment.apps/kedartemis-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"`).stdout
t.log('replica count: %s', replicaCount);
t.is(replicaCount, '0', 'replica count should start out as 0')
})
@ -37,8 +37,8 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b
t.is(
0,
sh.exec(`kubectl -n ${testNamespace} apply -f ${tmpFile.name}`).code, 'creating scaledObject should work.'
)
)
// with messages published, the consumer deployment should start receiving the messages
let replicaCount = '0'
for (let i = 0; i < 10 && replicaCount !== '5'; i++) {
@ -50,9 +50,9 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b
sh.exec('sleep 5s')
}
}
t.is('5', replicaCount, 'Replica count should be 5 after 10 seconds')
for (let i = 0; i < 50 && replicaCount !== '0'; i++) {
replicaCount = sh.exec(
`kubectl get deployment.apps/kedartemis-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"`
@ -61,7 +61,7 @@ test.serial(`Deployment should scale to 5 with 1000 messages on the queue then b
sh.exec('sleep 5s')
}
}
t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes')
})
@ -110,4 +110,4 @@ spec:
brokerAddress: "test"
authenticationRef:
name: trigger-auth-kedartemis
`
`

View File

@ -139,4 +139,4 @@ spec:
metadata:
blobContainerName: container-name
blobPrefix: blobsubpath
connection: AzureWebJobsStorage`
connection: AzureWebJobsStorage`

View File

@ -13,10 +13,10 @@ metadata:
data:
alerting_rules.yml: |
{}
alerts: |
{}
prometheus.yml: |
global:
evaluation_interval: 1m
@ -262,13 +262,13 @@ data:
target_label: kubernetes_pod_name
scrape_interval: 5m
scrape_timeout: 30s
recording_rules.yml: |
{}
rules: |
{}
---
# Source: prometheus/templates/server-serviceaccount.yaml
@ -284,7 +284,7 @@ metadata:
name: prometheus-server
annotations:
{}
---
# Source: prometheus/templates/server-clusterrole.yaml
@ -411,7 +411,7 @@ spec:
- --webhook-url=http://127.0.0.1:9090/-/reload
resources:
{}
volumeMounts:
- name: config-volume
mountPath: /etc/config
@ -447,7 +447,7 @@ spec:
successThreshold: 1
resources:
{}
volumeMounts:
- name: config-volume
mountPath: /etc/config
@ -459,7 +459,7 @@ spec:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
terminationGracePeriodSeconds: 300
volumes:
- name: config-volume
@ -595,5 +595,3 @@ spec:
---
# Source: prometheus/templates/server-vpa.yaml

View File

@ -24,7 +24,7 @@ test.before(t => {
sh.config.silent = true
// create deployments - there are two deployments - both using the same image but one deployment
// is directly tied to the KEDA HPA while the other is isolated that can be used for metrics
// is directly tied to the KEDA HPA while the other is isolated that can be used for metrics
// even when the KEDA deployment is at zero - the service points to both deployments
const tmpFile = tmp.fileSync()
fs.writeFileSync(tmpFile.name, deployYaml.replace('{{PROMETHEUS_NAMESPACE}}', prometheusNamespace))

View File

@ -96,9 +96,9 @@ spec:
- image: rabbitmq:3-management
name: rabbitmq
env:
- name: RABBITMQ_DEFAULT_USER
- name: RABBITMQ_DEFAULT_USER
value: "{{USERNAME}}"
- name: RABBITMQ_DEFAULT_PASS
- name: RABBITMQ_DEFAULT_PASS
value: "{{PASSWORD}}"
- name: RABBITMQ_DEFAULT_VHOST
value: "{{VHOST}}"

View File

@ -88,7 +88,7 @@ metadata:
labels:
app: test-deployment
spec:
replicas: 0
replicas: 0
selector:
matchLabels:
app: test-deployment

View File

@ -71,7 +71,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => {
sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`)
}
sh.exec(`kubectl delete namespace ${testNamespace}`)
// remove rabbitmq
// remove rabbitmq
RabbitMQHelper.uninstallRabbit(rabbitmqNamespace)
t.end()
})
@ -90,7 +90,7 @@ metadata:
labels:
app: test-deployment
spec:
replicas: 0
replicas: 0
selector:
matchLabels:
app: test-deployment

View File

@ -15,7 +15,7 @@ const connectionString = `amqp://${username}:${password}@rabbitmq.${rabbitmqName
const messageCount = 500
test.before(t => {
// install rabbitmq
// install rabbitmq
RabbitMQHelper.installRabbit(t, username, password, vhost, rabbitmqNamespace)
sh.config.silent = true
@ -72,7 +72,7 @@ test.after.always.cb('clean up rabbitmq-queue deployment', t => {
}
sh.exec(`kubectl delete namespace ${testNamespace}`)
// remove rabbitmq
// remove rabbitmq
RabbitMQHelper.uninstallRabbit(rabbitmqNamespace)
t.end()
})
@ -91,7 +91,7 @@ metadata:
labels:
app: test-deployment
spec:
replicas: 0
replicas: 0
selector:
matchLabels:
app: test-deployment

View File

@ -555,4 +555,4 @@ spec:
args: ["write"]
restartPolicy: Never
backoffLimit: 4
`
`

View File

@ -246,4 +246,4 @@ spec:
name: redis-password
key: password
restartPolicy: Never
`
`

View File

@ -70,4 +70,4 @@ ENV PATH=${PATH}:/usr/local/go/bin \
GOPATH=/go
# Install FOSSA tooling
RUN curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install.sh | bash
RUN curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install.sh | bash

17
tools/sort_scalers.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/sh
set -euo pipefail
LEAD='TRIGGERS-START'
TAIL='TRIGGERS-END'
SCALERS_FILE="pkg/scaling/scale_handler.go"
CURRENT=$(cat "${SCALERS_FILE}" | awk "/${LEAD}/,/${TAIL}/" | grep "case")
SORTED=$(cat "${SCALERS_FILE}" | awk "/${LEAD}/,/${TAIL}/" | grep "case" | sort)
if [[ "${CURRENT}" == "${SORTED}" ]]; then
echo "Scalers are sorted in ${SCALERS_FILE}"
exit 0
else
echo "Scalers are not sorted alphabetical in ${SCALERS_FILE}"
exit 1
fi