Compare commits
No commits in common. "main" and "v0.1.0" have entirely different histories.
|
@ -1,94 +0,0 @@
|
|||
#-------------------------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
|
||||
#-------------------------------------------------------------------------------------------------------------
|
||||
|
||||
FROM golang:1.24.3
|
||||
|
||||
# Avoid warnings by switching to noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# This Dockerfile adds a non-root 'vscode' user with sudo access. However, for Linux,
|
||||
# this user's GID/UID must match your local user UID/GID to avoid permission issues
|
||||
# with bind mounts. Update USER_UID / USER_GID if yours is not 1000. See
|
||||
# https://aka.ms/vscode-remote/containers/non-root-user for details.
|
||||
ARG USERNAME=vscode
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=$USER_UID
|
||||
|
||||
ENV GO111MODULE=auto
|
||||
|
||||
# Configure apt, install packages and tools
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install --no-install-recommends apt-utils dialog unzip 2>&1 \
|
||||
#
|
||||
# Verify git, process tools, lsb-release (common in install instructions for CLIs) installed
|
||||
&& apt-get -y install git iproute2 procps lsb-release \
|
||||
#
|
||||
# Install gocode-gomod
|
||||
&& go get -x -d github.com/stamblerre/gocode 2>&1 \
|
||||
&& go build -o gocode-gomod github.com/stamblerre/gocode \
|
||||
&& mv gocode-gomod $GOPATH/bin/ \
|
||||
#
|
||||
# Install Go tools
|
||||
&& go get -u -v \
|
||||
github.com/mdempsky/gocode \
|
||||
github.com/uudashr/gopkgs/cmd/gopkgs \
|
||||
github.com/ramya-rao-a/go-outline \
|
||||
github.com/acroca/go-symbols \
|
||||
github.com/godoctor/godoctor \
|
||||
golang.org/x/tools/cmd/guru \
|
||||
golang.org/x/tools/cmd/gorename \
|
||||
github.com/rogpeppe/godef \
|
||||
github.com/zmb3/gogetdoc \
|
||||
github.com/haya14busa/goplay/cmd/goplay \
|
||||
github.com/sqs/goreturns \
|
||||
github.com/josharian/impl \
|
||||
github.com/davidrjenni/reftools/cmd/fillstruct \
|
||||
github.com/fatih/gomodifytags \
|
||||
github.com/cweill/gotests/... \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
golang.org/x/lint/golint \
|
||||
github.com/alecthomas/gometalinter 2>&1 \
|
||||
github.com/mgechev/revive \
|
||||
github.com/derekparker/delve/cmd/dlv 2>&1 \
|
||||
&& go install honnef.co/go/tools/cmd/staticcheck@latest \
|
||||
&& go install golang.org/x/tools/gopls@latest \
|
||||
# Install golangci-lint
|
||||
&& curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.0 \
|
||||
#
|
||||
# Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user.
|
||||
&& groupadd --gid $USER_GID $USERNAME \
|
||||
&& useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \
|
||||
# [Optional] Add sudo support
|
||||
&& apt-get install -y sudo \
|
||||
&& echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
|
||||
&& chmod 0440 /etc/sudoers.d/$USERNAME \
|
||||
# Docker install
|
||||
&& apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common lsb-release \
|
||||
&& curl -fsSL https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]')/gpg | apt-key add - 2>/dev/null \
|
||||
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y docker-ce-cli \
|
||||
#
|
||||
# Install pip & pre-commit
|
||||
&& apt-get -y install python3-pip \
|
||||
&& python3 -m pip install --no-cache-dir pre-commit \
|
||||
#
|
||||
# Clean up
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Enable go modules
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ENV OPERATOR_RELEASE_VERSION=v1.26.0
|
||||
RUN ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) \
|
||||
&& OS=$(uname | awk '{print tolower($0)}') \
|
||||
&& OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/${OPERATOR_RELEASE_VERSION} \
|
||||
&& curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} \
|
||||
&& chmod +x operator-sdk_${OS}_${ARCH} \
|
||||
&& mkdir -p /usr/local/bin/ \
|
||||
&& cp operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk \
|
||||
&& rm operator-sdk_${OS}_${ARCH}
|
|
@ -1,34 +0,0 @@
|
|||
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/master/containers/go
|
||||
{
|
||||
"name": "Go",
|
||||
"dockerFile": "Dockerfile",
|
||||
"runArgs": [
|
||||
// Uncomment the next line to use a non-root user. On Linux, this will prevent
|
||||
// new files getting created as root, but you may need to update the USER_UID
|
||||
// and USER_GID in .devcontainer/Dockerfile to match your user if not 1000.
|
||||
// "-u", "vscode",
|
||||
|
||||
// Mount go mod cache
|
||||
"-v", "keda-gomodcache:/go/pkg",
|
||||
// Cache vscode exentsions installs and homedir
|
||||
"-v", "keda-vscodecache:/root/.vscode-server",
|
||||
|
||||
// Mount docker socket for docker builds
|
||||
"-v", "/var/run/docker.sock:/var/run/docker.sock",
|
||||
|
||||
"--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"
|
||||
],
|
||||
|
||||
// Use 'settings' to set *default* container specific settings.json values on container create.
|
||||
// You can edit these settings after create using File > Preferences > Settings > Remote.
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
"go.gopath": "/go"
|
||||
},
|
||||
|
||||
// Add the IDs of extensions you want installed when the container is created in the array below.
|
||||
"extensions": [
|
||||
"golang.go"
|
||||
]
|
||||
}
|
|
@ -1,14 +1,13 @@
|
|||
*.md
|
||||
*.yaml
|
||||
*.yml
|
||||
.*
|
||||
/LICENSE
|
||||
/bin
|
||||
/charts
|
||||
/cli
|
||||
/config
|
||||
/docs
|
||||
/examples
|
||||
/target
|
||||
/tests
|
||||
Dockerfile
|
||||
/bin
|
||||
.git
|
||||
.gitignore
|
||||
/charts
|
||||
/.vscode
|
||||
/bin
|
||||
/cli
|
||||
/examples
|
||||
/docs
|
||||
/.envrc
|
||||
CONTRIBUTING.md
|
||||
Makefile
|
|
@ -1,3 +1,3 @@
|
|||
# These owners will be the default owners for everything in
|
||||
# the repo. Unless a later match takes precedence
|
||||
* @kedacore/keda-http-contributors
|
||||
* @ahmelsayed @zroubalik @tomkerkhove @arschles @khaosdoctor
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
name: Feature request 🧭
|
||||
description: Suggest an idea for this project
|
||||
labels: "needs-discussion,feature-request"
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposal
|
||||
description: "What would you like to have as a feature"
|
||||
placeholder: "A clear and concise description of what you want to happen."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Use-Case
|
||||
description: "How would this help you?"
|
||||
placeholder: "Tell us more what you'd like to achieve."
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: interested-in-implementing-the-feature
|
||||
attributes:
|
||||
label: Is this a feature you are interested in implementing yourself?
|
||||
options:
|
||||
- 'No'
|
||||
- 'Maybe'
|
||||
- 'Yes'
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: anything-else
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: "Let us know if you have anything else to share"
|
|
@ -1,93 +0,0 @@
|
|||
name: Report a bug 🐛
|
||||
description: Create a report to help us improve
|
||||
labels: ["bug"]
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Report
|
||||
description: "What bug have you encountered?"
|
||||
placeholder: "A clear and concise description of what the bug is."
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: What did you expect to happen?
|
||||
placeholder: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: Actual Behavior
|
||||
description: Also tell us, what did you see is happen?
|
||||
placeholder: Tell us what you see that is happening
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: repro-steps
|
||||
attributes:
|
||||
label: Steps to Reproduce the Problem
|
||||
description: "How can we reproduce this bug? Please walk us through it step by step."
|
||||
value: |
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs from KEDA HTTP operator
|
||||
description: "Provide logs from the KEDA HTTP operator, if need be."
|
||||
value: |
|
||||
```
|
||||
example
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: keda-http-version
|
||||
attributes:
|
||||
label: "HTTP Add-on Version"
|
||||
description: "What version of the KEDA HTTP Add-on are you running?"
|
||||
options:
|
||||
- "0.10.0"
|
||||
- "0.9.0"
|
||||
- "0.8.0"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: kubernetes-version
|
||||
attributes:
|
||||
label: Kubernetes Version
|
||||
description: What version of Kubernetes that are you running?
|
||||
options:
|
||||
- "1.32"
|
||||
- "1.31"
|
||||
- "1.30"
|
||||
- "< 1.30"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: cluster-type
|
||||
attributes:
|
||||
label: Platform
|
||||
description: Where is your cluster running?
|
||||
options:
|
||||
- Any
|
||||
- Alibaba Cloud
|
||||
- Amazon Web Services
|
||||
- Google Cloud
|
||||
- Microsoft Azure
|
||||
- Red Hat OpenShift
|
||||
- Other
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: anything-else
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: "Let us know if you have anything else to share"
|
|
@ -1,28 +0,0 @@
|
|||
---
|
||||
name: KEDA Release Tracker
|
||||
about: Template to keep track of the progress for a new KEDA HTTP add-on release.
|
||||
title: "Release: "
|
||||
labels: governance,release-management
|
||||
assignees: tomkerkhove,jorturfer
|
||||
---
|
||||
|
||||
This issue template is used to track the rollout of a new KEDA HTTP add-on version.
|
||||
|
||||
For the full release process, we recommend reading [this document]([https://github.com/kedacore/keda/blob/main/RELEASE-PROCESS.md](https://github.com/kedacore/http-add-on/blob/main/RELEASE-PROCESS.md)).
|
||||
|
||||
## Required items
|
||||
|
||||
- [ ] List items that are still open, but required for this release
|
||||
|
||||
# Timeline
|
||||
|
||||
We aim to release this release in the week of <week range, example March 27-31>.
|
||||
|
||||
## Progress
|
||||
|
||||
- [ ] Add the new version to GitHub Bug report template
|
||||
- [ ] Create KEDA release
|
||||
- [ ] Prepare & ship Helm chart
|
||||
- [ ] Publish on Artifact Hub ([repo](https://github.com/kedacore/external-scalers))
|
||||
- [ ] Provide update in Slack
|
||||
- [ ] Tweet about new release
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
name: Report a bug
|
||||
about: Create a report to help us improve
|
||||
labels: bug
|
||||
---
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
## Actual Behavior
|
||||
|
||||
## Steps to Reproduce the Problem
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Specifications
|
||||
|
||||
- **KEDA Version:** *Please elaborate*
|
||||
- **KEDA HTTP Add-on Version:** *Please elaborate*
|
||||
- **Platform & Version:** *Please elaborate*
|
||||
- **Kubernetes Version:** *Please elaborate*
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
labels: needs-discussion,feature-request
|
||||
---
|
||||
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Use-Case
|
||||
|
||||
Tell us more what you'd like to achieve
|
||||
|
||||
### Specification
|
||||
|
||||
Tell us in detail how this feature should work
|
|
@ -1,4 +1,4 @@
|
|||
blank_issues_enabled: true
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Ask a question or get support
|
||||
url: https://github.com/kedacore/http-add-on/discussions/new
|
||||
|
|
|
@ -9,7 +9,6 @@ _Provide a description of what has been changed_
|
|||
### Checklist
|
||||
|
||||
- [ ] Commits are signed with Developer Certificate of Origin (DCO)
|
||||
- [ ] Changelog has been updated and is aligned with our [changelog requirements](https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog)
|
||||
- [ ] Any necessary documentation is added, such as:
|
||||
- [`README.md`](/README.md)
|
||||
- [The `docs/` directory](./docs)
|
||||
|
|
|
@ -8,10 +8,6 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -20,10 +16,6 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: docker
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -32,7 +24,3 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- stale-bot-ignore
|
||||
- feature
|
||||
- security
|
||||
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed in 7 days if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
closeComment: >
|
||||
This issue has been automatically closed due to inactivity.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
|
@ -1,50 +0,0 @@
|
|||
name: Automatically add new issue to backlog
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
track_issue:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get project data
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_AUTOMATION_PAT }}
|
||||
ORGANIZATION: kedacore
|
||||
# This refers to our backlog project: https://github.com/orgs/kedacore/projects/6/views/1
|
||||
PROJECT_NUMBER: 6
|
||||
run: |
|
||||
gh api graphql -f query='
|
||||
query($org: String!, $number: Int!) {
|
||||
organization(login: $org){
|
||||
projectV2(number: $number) {
|
||||
id
|
||||
fields(first:20) {
|
||||
nodes {
|
||||
... on ProjectV2Field {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}' -f org=$ORGANIZATION -F number=$PROJECT_NUMBER > project_data.json
|
||||
|
||||
echo 'PROJECT_ID='$(jq '.data.organization.projectV2.id' project_data.json) >> $GITHUB_ENV
|
||||
- name: Add issue to project
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_AUTOMATION_PAT }}
|
||||
ISSUE_ID: ${{ github.event.issue.node_id }}
|
||||
run: |
|
||||
item_id="$( gh api graphql -f query='
|
||||
mutation($project:ID!, $issue:ID!) {
|
||||
addProjectV2ItemById(input: {projectId: $project, contentId: $issue}) {
|
||||
item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}' -f project=$PROJECT_ID -f issue=$ISSUE_ID --jq '.data.addProjectV2ItemById.item.id')"
|
|
@ -5,26 +5,30 @@ on:
|
|||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
env:
|
||||
IMAGE_OPERATOR_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-operator
|
||||
IMAGE_INTERCEPTOR_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-interceptor
|
||||
IMAGE_SCALER_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-scaler
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
build_operator:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=sha::$(git rev-parse --short HEAD)
|
||||
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -33,24 +37,93 @@ jobs:
|
|||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- name: Build and push operator image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
# Docker repository to tag the image with
|
||||
tags: |
|
||||
${{ env.IMAGE_OPERATOR_NAME }}:canary,${{ env.IMAGE_OPERATOR_NAME }}:sha-${{ steps.prep.outputs.sha }}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
file: operator/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: canary
|
||||
build_interceptor:
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: canary
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=sha::$(git rev-parse --short HEAD)
|
||||
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
# https://github.blog/changelog/2021-03-24-packages-container-registry-now-supports-github_token/
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build and push interceptor image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
# Docker repository to tag the image with
|
||||
tags: ${{ env.IMAGE_INTERCEPTOR_NAME }}:canary,${{ env.IMAGE_INTERCEPTOR_NAME }}:sha-${{steps.prep.outputs.sha}}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
file: interceptor/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
||||
build_scaler:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=sha::$(git rev-parse --short HEAD)
|
||||
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
# Password or personal access token used to log in to a Docker registry. If not set then no login will occur
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build and push scaler image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
# Docker repository to tag the image with
|
||||
tags: ${{ env.IMAGE_SCALER_NAME }}:canary,${{ env.IMAGE_SCALER_NAME }}:sha-${{steps.prep.outputs.sha}}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
file: scaler/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
|
|
@ -2,35 +2,36 @@ name: Publish official image to GitHub Container Registry
|
|||
|
||||
on:
|
||||
push:
|
||||
tags: ["v[0-9].[0-9].[0-9]"]
|
||||
tags: [ "v[0-9].[0-9].[0-9]" ]
|
||||
|
||||
env:
|
||||
IMAGE_OPERATOR_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-operator
|
||||
IMAGE_INTERCEPTOR_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-interceptor
|
||||
IMAGE_SCALER_NAME: ghcr.io/${{ github.repository_owner }}/http-add-on-scaler
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
build_operator:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
- name: Get the version
|
||||
id: get_version
|
||||
run: |
|
||||
echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
|
||||
|
||||
- name: Release Deployment YAML file
|
||||
run: make release
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -39,55 +40,93 @@ jobs:
|
|||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# Get release information to determine id of the current release
|
||||
- name: Get Release
|
||||
id: get-release-info
|
||||
uses: bruceadams/get-release@74c3d60f5a28f358ccf241a00c9021ea16f0569f # v1.3.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Upload deployment YAML file to GitHub release
|
||||
- name: Upload Deployment YAML file
|
||||
id: upload-deployment-yaml
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push operator image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
# Docker repository to tag the image with
|
||||
tags: ${{ env.IMAGE_OPERATOR_NAME }}:latest,${{ env.IMAGE_OPERATOR_NAME }}:${{ steps.get_version.outputs.VERSION }}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
sh.keda.http.image.release=${{github.ref}}
|
||||
file: operator/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
||||
# Upload CRD deployment YAML file to GitHub release
|
||||
- name: Upload Deployment YAML file
|
||||
id: upload-crd-deployment-yaml
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
build_interceptor:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
# Password or personal access token used to log in to a Docker registry. If not set then no login will occur
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build and push interceptor image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
# Docker repository to tag the image with
|
||||
tags: ${{ env.IMAGE_INTERCEPTOR_NAME }}:latest,${{ env.IMAGE_INTERCEPTOR_NAME }}:${{ steps.get_version.outputs.VERSION }}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
sh.keda.http.image.release=${{github.ref}}
|
||||
file: interceptor/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
||||
build_scaler:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up tags and refs
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
- name: Set up Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
# Password or personal access token used to log in to a Docker registry. If not set then no login will occur
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Server address of Docker registry. If not set then will default to Docker Hub
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build and push scaler image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
# Docker repository to tag the image with
|
||||
tags: ${{ env.IMAGE_SCALER_NAME }}:latest,${{ env.IMAGE_SCALER_NAME }}:${{ steps.get_version.outputs.VERSION }}
|
||||
labels: |
|
||||
sh.keda.http.image.source=${{github.event.repository.html_url}}
|
||||
sh.keda.http.image.created=${{steps.prep.outputs.created}}
|
||||
sh.keda.http.image.revision=${{github.sha}}
|
||||
sh.keda.http.image.release=${{github.ref}}
|
||||
file: scaler/Dockerfile
|
||||
context: .
|
||||
push: true
|
||||
|
|
|
@ -1,139 +0,0 @@
|
|||
name: e2e tests
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e_tests:
|
||||
runs-on: ubuntu-latest
|
||||
name: Execute e2e test on AMD64 ${{ matrix.kubernetesVersion }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- name: Install prerequisites
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl make ca-certificates gcc libc-dev -y
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: cluster
|
||||
|
||||
- name: Generate images and push to the cluster
|
||||
run: |
|
||||
make docker-build
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-operator:${VERSION} --name cluster
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-interceptor:${VERSION} --name cluster
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-scaler:${VERSION} --name cluster
|
||||
env:
|
||||
VERSION: ${{ github.sha }}
|
||||
|
||||
- name: Show Kubernetes version
|
||||
run: |
|
||||
kubectl version
|
||||
- name: Run e2e test
|
||||
run: |
|
||||
make e2e-test
|
||||
env:
|
||||
VERSION: ${{ github.sha }}
|
||||
|
||||
arm_image_generation:
|
||||
runs-on: ARM64
|
||||
name: Generate ARM64 images for e2e tests
|
||||
steps:
|
||||
- name: Install prerequisites
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl make ca-certificates gcc libc-dev -y
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Generate images
|
||||
run: |
|
||||
make docker-build
|
||||
env:
|
||||
VERSION: ${{ github.sha }}
|
||||
|
||||
arm_e2e_tests:
|
||||
runs-on: http-add-on-e2e
|
||||
needs: arm_image_generation
|
||||
name: Execute e2e test on ARM64 ${{ matrix.kubernetesVersion }}
|
||||
env:
|
||||
KUBECONFIG: ${{ github.workspace }}/.kube/config
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: ${{ runner.name }}
|
||||
|
||||
- name: Push images to the cluster
|
||||
run: |
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-operator:${VERSION} --name ${{ runner.name }}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-interceptor:${VERSION} --name ${{ runner.name }}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-scaler:${VERSION} --name ${{ runner.name }}
|
||||
env:
|
||||
VERSION: ${{ github.sha }}
|
||||
|
||||
- name: Show Kubernetes version
|
||||
run: |
|
||||
kubectl version
|
||||
|
||||
- name: Run e2e test
|
||||
run: |
|
||||
make e2e-test
|
||||
env:
|
||||
VERSION: ${{ github.sha }}
|
|
@ -1,48 +1,37 @@
|
|||
name: Build Images
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build_scaler:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Scaler
|
||||
run: |
|
||||
COMMIT=$(git rev-parse --short HEAD)
|
||||
VERSION=${COMMIT} make docker-build-scaler
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
build_operator:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build The Scaler
|
||||
run:
|
||||
docker build -t scaler -f scaler/Dockerfile .
|
||||
|
||||
build_operator:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build The Operator
|
||||
run: |
|
||||
COMMIT=$(git rev-parse --short=7 HEAD)
|
||||
VERSION=${COMMIT} make docker-build-operator
|
||||
run:
|
||||
docker build -t operator -f operator/Dockerfile .
|
||||
|
||||
build_interceptor:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build The Interceptor
|
||||
run: |
|
||||
COMMIT=$(git rev-parse --short=7 HEAD)
|
||||
VERSION=${COMMIT} make docker-build-interceptor
|
||||
run:
|
||||
docker build -t interceptor -f interceptor/Dockerfile .
|
||||
|
|
|
@ -5,22 +5,13 @@ on:
|
|||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
linkinator:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- uses: JustinBeckwith/linkinator-action@3d5ba091319fa7b0ac14703761eebb7d100e6f6d # v1
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JustinBeckwith/linkinator-action@v1
|
||||
with:
|
||||
paths: "**/*.md"
|
||||
markdown: true
|
||||
concurrency: 1
|
||||
retry: true
|
||||
linksToSkip: "https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler,http://opentelemetry-collector.open-telemetry-system:4318,http://opentelemetry-collector.open-telemetry-system:4318/v1/traces, https://www.gnu.org/software/make/"
|
||||
|
|
|
@ -1,85 +1,20 @@
|
|||
name: Validate
|
||||
name: Run tests
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
name: validate - ${{ matrix.name }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- runner: ARM64
|
||||
name: arm64
|
||||
- runner: ubuntu-latest
|
||||
name: amd64
|
||||
run_tests:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Check go version
|
||||
run: go version
|
||||
|
||||
- name: Set Go paths
|
||||
id: go-paths
|
||||
run: |
|
||||
echo ::set-output name=mod_cache::$(go env GOMODCACHE)
|
||||
echo ::set-output name=build_cache::$(go env GOCACHE)
|
||||
|
||||
- name: Go modules cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.mod_cache }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.build_cache }}
|
||||
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Download Go Modules
|
||||
run: go mod download
|
||||
|
||||
- name: Codegen
|
||||
run: make verify-codegen
|
||||
|
||||
- name: Manifests
|
||||
run: make verify-manifests
|
||||
|
||||
- name: Mockgen
|
||||
run: make verify-mockgen
|
||||
|
||||
- name: Build
|
||||
run: ARCH=${{ matrix.name }} make build
|
||||
|
||||
go-version: 1.16.x
|
||||
- name: Test
|
||||
run: ARCH=${{ matrix.name }} make test
|
||||
|
||||
statics:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Static Checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: "1.24"
|
||||
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
version: v2.1.0
|
||||
run: go test ./...
|
||||
|
|
|
@ -354,11 +354,4 @@ admin/target
|
|||
admin/Cargo.lock
|
||||
|
||||
/target
|
||||
.envrc
|
||||
|
||||
# locally generated certs for testing TLS
|
||||
*.crt
|
||||
*.pem
|
||||
*.csr
|
||||
*.srl
|
||||
*.ext
|
||||
.envrc
|
|
@ -1,74 +0,0 @@
|
|||
version: "2"
|
||||
run:
|
||||
concurrency: 4
|
||||
build-tags:
|
||||
- e2e
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
- bodyclose
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- copyloopvar
|
||||
#- depguard #https://github.com/kedacore/keda/issues/4980
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
#- funlen
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
settings:
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 40
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- dupl
|
||||
- revive
|
||||
- unparam
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/kedacore/http-add-on)
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
# Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to
|
||||
# resolve https://github.com/kedacore/keda/issues/4379
|
||||
- operator/controllers/http/suite_test.go
|
||||
- operator/main.go
|
|
@ -1,45 +0,0 @@
|
|||
default_stages: [commit, push]
|
||||
minimum_pre_commit_version: "1.20.0"
|
||||
repos:
|
||||
- repo: https://github.com/dnephin/pre-commit-golang
|
||||
rev: v0.3.5
|
||||
hooks:
|
||||
- id: go-fmt
|
||||
name: Run go fmt against the code
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.4.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: check-merge-conflict
|
||||
- id: mixed-line-ending
|
||||
- repo: https://github.com/thlorenz/doctoc.git
|
||||
rev: v2.0.0
|
||||
hooks:
|
||||
- id: doctoc
|
||||
name: Add TOC for md files
|
||||
files: ^README\.md$|^CONTRIBUTING\.md$
|
||||
args:
|
||||
- "--maxlevel"
|
||||
- "3"
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: language-matters
|
||||
language: pygrep
|
||||
name: Check for language that we do not accept as community
|
||||
description: Please use "deny_list" or "allow_list" instead.
|
||||
entry: "(?i)(black|white)[_-]?(list|List)"
|
||||
pass_filenames: true
|
||||
- id: golangci-lint
|
||||
language: golang
|
||||
name: Run golangci against the code
|
||||
entry: golangci-lint run
|
||||
types: [go]
|
||||
pass_filenames: false
|
||||
- id: validate-changelog
|
||||
name: Validate Changelog
|
||||
language: system
|
||||
entry: "bash hack/validate-changelog.sh"
|
||||
pass_filenames: false
|
||||
files: CHANGELOG\.md
|
12
.whitesource
12
.whitesource
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"scanSettings": {
|
||||
"baseBranches": []
|
||||
},
|
||||
"checkRunSettings": {
|
||||
"vulnerableCheckRunConclusionLevel": "failure",
|
||||
"displayMode": "diff"
|
||||
},
|
||||
"issueSettings": {
|
||||
"minSeverityLevel": "LOW"
|
||||
}
|
||||
}
|
19
ADOPTERS.md
19
ADOPTERS.md
|
@ -1,19 +0,0 @@
|
|||
# KEDA HTTP Add-on Adopters
|
||||
|
||||
This page contains a list of organizations who are using KEDA's HTTP Add-on in production or at stages of testing.
|
||||
|
||||
## Adopters
|
||||
|
||||
| Organization | Status | More Information (Blog post, etc.) |
|
||||
| ------------ | ---------| ---------------|
|
||||
| PropulsionAI ||[PropulsionAI](https://propulsionhq.com) allows you to add AI to your apps, without writing code.|
|
||||
| REWE Digital ||From delivery service to market — [REWE Digital](https://www.rewe-digital.com) strengthens leading technological position of REWE Group in food retail sector. |
|
||||
|
||||
## Become an adopter!
|
||||
|
||||
You can easily become an adopter by sending a pull request to this file.
|
||||
|
||||
These are the adoption statuses that you can use:
|
||||
|
||||
- 
|
||||
- 
|
214
CHANGELOG.md
214
CHANGELOG.md
|
@ -1,214 +0,0 @@
|
|||
# Changelog
|
||||
|
||||
<!--
|
||||
New changelog entries must be inline with our changelog guidelines.
|
||||
Please refer to https://github.com/kedacore/keda/blob/main/CONTRIBUTING.md#Changelog to learn more.
|
||||
-->
|
||||
|
||||
This changelog keeps track of work items that have been completed and are ready to be shipped in the next release.
|
||||
|
||||
## History
|
||||
|
||||
- [Unreleased](#unreleased)
|
||||
- [v0.10.0](#v0100)
|
||||
- [v0.9.0](#v090)
|
||||
- [v0.8.0](#v080)
|
||||
- [v0.7.0](#v070)
|
||||
- [v0.6.0](#v060)
|
||||
- [v0.5.0](#v050)
|
||||
|
||||
## Unreleased
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add failover service on cold-start ([#1280](https://github.com/kedacore/http-add-on/pull/1280))
|
||||
- **General**: Add configurable tracing support to the interceptor proxy ([#1021](https://github.com/kedacore/http-add-on/pull/1021))
|
||||
- **General**: Allow using HSO and SO with different names ([#1293](https://github.com/kedacore/http-add-on/issues/1293))
|
||||
- **General**: Support profiling for KEDA components ([#4789](https://github.com/kedacore/keda/issues/4789))
|
||||
- **General**: Add possibility to skip TLS verification for upstreams in interceptor ([#1307](https://github.com/kedacore/http-add-on/pull/1307))
|
||||
### Improvements
|
||||
|
||||
- **Interceptor**: Support HTTPScaledObject scoped timeout ([#813](https://github.com/kedacore/http-add-on/issues/813))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### Deprecations
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.10.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Fix infrastructure crashes when deleting ScaledObject while scaling
|
||||
- **General**: Fix kubectl active printcolumn ([#1211](https://github.com/kedacore/http-add-on/issues/1211))
|
||||
- **General**: Support InitialCooldownPeriod for HTTPScaledObject [#1213](https://github.com/kedacore/http-add-on/issues/1213)
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.9.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: Drop support for deprecated field `spec.scaleTargetRef.deployment` ([#1061](https://github.com/kedacore/http-add-on/issues/1061))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support portName in HTTPScaledObject service scaleTargetRef ([#1174](https://github.com/kedacore/http-add-on/issues/1174))
|
||||
- **General**: Support setting multiple TLS certs for different domains on the interceptor proxy ([#1116](https://github.com/kedacore/http-add-on/issues/1116))
|
||||
- **Interceptor**: Add support for for AWS ELB healthcheck probe ([#1198](https://github.com/kedacore/http-add-on/issues/1198))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Align the interceptor metrics env var configuration with the OTEL spec ([#1031](https://github.com/kedacore/http-add-on/issues/1031))
|
||||
- **General**: Include trailing 0 window buckets in RPS calculation ([#1075](https://github.com/kedacore/http-add-on/issues/1075))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Sign images with Cosign ([#1062](https://github.com/kedacore/http-add-on/issues/1062))
|
||||
|
||||
## v0.8.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add configurable TLS on the wire support to the interceptor proxy ([#907](https://github.com/kedacore/http-add-on/issues/907))
|
||||
- **General**: Add support for collecting metrics using a Prometheus compatible endpoint or by sending metrics to an OpenTelemetry's HTTP endpoint ([#910](https://github.com/kedacore/http-add-on/issues/910))
|
||||
- **General**: Propagate HTTPScaledObject labels and annotations to ScaledObject ([#840](https://github.com/kedacore/http-add-on/issues/840))
|
||||
- **General**: Provide support for allowing HTTP scaler to work alongside other core KEDA scalers ([#489](https://github.com/kedacore/http-add-on/issues/489))
|
||||
- **General**: Support aggregation windows ([#882](https://github.com/kedacore/http-add-on/issues/882))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Ensure operator is aware about changes on underlying ScaledObject ([#900](https://github.com/kedacore/http-add-on/issues/900))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
- **General**: Deprecated `targetPendingRequests` in favor of `spec.scalingMetric.*.targetValue` ([#959](https://github.com/kedacore/http-add-on/discussions/959))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Align with the new format of Ingress in the example demo ([#979](https://github.com/kedacore/http-add-on/pull/979))
|
||||
- **General**: Unify loggers ([#958](https://github.com/kedacore/http-add-on/issues/958))
|
||||
|
||||
## v0.7.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: `host` field has been removed in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552)|[#888](https://github.com/kedacore/http-add-on/pull/888))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support any resource which implements `/scale` subresource ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **General**: Improve Scaler reliability adding probes and 3 replicas ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Add new user agent probe ([#862](https://github.com/kedacore/http-add-on/issues/862))
|
||||
- **General**: Fix external scaler getting into bad state when retrieving queue lengths fails. ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
- **General**: Increase ScaledObject polling interval to 15 seconds ([#799](https://github.com/kedacore/http-add-on/issues/799))
|
||||
- **General**: Set forward request RawPath to original request RawPath ([#864](https://github.com/kedacore/http-add-on/issues/864))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: Deprecated `KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS` in favor of `KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS` ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Bump golang version ([#853](https://github.com/kedacore/http-add-on/pull/853))
|
||||
|
||||
## v0.6.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add manifests to deploy the Add-on ([#716](https://github.com/kedacore/http-add-on/issues/716))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **Scaler**: Decrease memory usage by allowing increasing stream interval configuration ([#745](https://github.com/kedacore/http-add-on/pull/745))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **Interceptor**: Add support for streaming responses ([#743](https://github.com/kedacore/http-add-on/issues/743))
|
||||
- **Interceptor**: Fatal error: concurrent map iteration and map write ([#726](https://github.com/kedacore/http-add-on/issues/726))
|
||||
- **Interceptor**: Keep original Host in the Host header ([#331](https://github.com/kedacore/http-add-on/issues/331))
|
||||
- **Interceptor**: Provide graceful shutdown for http servers on SIGINT and SIGTERM ([#731](https://github.com/kedacore/http-add-on/issues/731))
|
||||
- **Operator**: Remove ScaledObject `name` & `app` custom labels ([#717](https://github.com/kedacore/http-add-on/issues/717))
|
||||
- **Scaler**: Provide graceful shutdown for grpc server on SIGINT and SIGTERM ([#731](https://github.com/kedacore/http-add-on/issues/731))
|
||||
- **Scaler**: Reimplement custom interceptor metrics ([#718](https://github.com/kedacore/http-add-on/issues/718))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: `host` field deprecated in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Adding a changelog validating script to check for formatting and order ([#761](https://github.com/kedacore/http-add-on/pull/761))
|
||||
- **General**: Skip not required CI checks on PRs on new commits ([#801](https://github.com/kedacore/http-add-on/pull/801))
|
||||
|
||||
## v0.5.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
None.
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Log incoming requests using the Combined Log Format ([#669](https://github.com/kedacore/http-add-on/pull/669))
|
||||
- **Routing**: Add multi-host support to `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
- **Routing**: Support path-based routing ([#338](https://github.com/kedacore/http-add-on/issues/338))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **General**: Automatically tag Docker image with commit SHA ([#567](https://github.com/kedacore/http-add-on/issues/567))
|
||||
- **Operator**: Migrate project to Kubebuilder v3 ([#625](https://github.com/kedacore/http-add-on/issues/625))
|
||||
- **RBAC**: Introduce fine-grained permissions per component and reduce required permissions ([#612](https://github.com/kedacore/http-add-on/issues/612))
|
||||
- **Routing**: New routing table implementation that relies on the live state of HTTPScaledObjects on the K8s Cluster instead of a ConfigMap that is updated periodically ([#605](https://github.com/kedacore/http-add-on/issues/605))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Changes to HTTPScaledObjects now take effect ([#605](https://github.com/kedacore/http-add-on/issues/605))
|
||||
- **General**: HTTPScaledObject is the owner of the underlying ScaledObject ([#703](https://github.com/kedacore/http-add-on/issues/703))
|
||||
- **Controller**: Use kedav1alpha1.ScaledObject default values ([#607](https://github.com/kedacore/http-add-on/issues/607))
|
||||
- **Routing**: Lookup host without port ([#608](https://github.com/kedacore/http-add-on/issues/608))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: `host` field deprecated in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
|
||||
Previously announced deprecation(s):
|
||||
|
||||
- None.
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Use kubernetes e2e images for e2e test and samples ([#665](https://github.com/kedacore/http-add-on/issues/665))
|
||||
- **e2e tests**: Use the same e2e system as in core ([#686](https://github.com/kedacore/http-add-on/pull/686))
|
154
CONTRIBUTING.md
154
CONTRIBUTING.md
|
@ -1,154 +0,0 @@
|
|||
# Contributing to KEDA
|
||||
|
||||
Thanks for helping make KEDA better 😍.
|
||||
|
||||
There are many areas we can use contributions - ranging from code, documentation, feature proposals, issue triage, samples, and content creation.
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
## Table of contents
|
||||
|
||||
- [Project governance](#project-governance)
|
||||
- [Including Documentation Changes](#including-documentation-changes)
|
||||
- [Development Environment Setup](#development-environment-setup)
|
||||
- [Locally Build & Deploy KEDA HTTP Addon](#locally-build--deploy-keda-http-addon)
|
||||
- [Pre-requisite:](#pre-requisite)
|
||||
- [Building:](#building)
|
||||
- [Deploying:](#deploying)
|
||||
- [Load testing with k9s:](#load-testing-with-k9s)
|
||||
- [Developer Certificate of Origin: Signing your work](#developer-certificate-of-origin-signing-your-work)
|
||||
- [Every commit needs to be signed](#every-commit-needs-to-be-signed)
|
||||
- [I didn't sign my commit, now what?!](#i-didnt-sign-my-commit-now-what)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Project governance
|
||||
|
||||
You can learn about the governance of KEDA [here](https://github.com/kedacore/governance).
|
||||
|
||||
## Including Documentation Changes
|
||||
|
||||
For any contribution you make that impacts the behavior or experience of the KEDA HTTP Add-on, please make sure you include updates to the documentation in the same pull request, and if appropriate, changes to [the KEDA main documentation repository](https://github.com/kedacore/keda-docs). Contributions that do not include documentation or samples will be rejected.
|
||||
|
||||
## Development Environment Setup
|
||||
|
||||
We have a comprehensive how-to document that details the setup and configuration of the development environment for this project.
|
||||
|
||||
Please find it at [docs/developing.md](./docs/developing.md).
|
||||
|
||||
## Locally Build & Deploy KEDA HTTP Addon
|
||||
|
||||
### Pre-requisite:
|
||||
|
||||
- A running Kubernetes cluster with KEDA installed.
|
||||
- [Make](https://www.gnu.org/software/make/)
|
||||
- [Helm](https://helm.sh/)
|
||||
- [k9s](https://github.com/derailed/k9s) (_optional_)
|
||||
|
||||
### Building:
|
||||
|
||||
- Fork & clone the repo:
|
||||
```bash
|
||||
$ git clone https://github.com/<your-username>/http-add-on.git
|
||||
```
|
||||
- Change into the repo directory:
|
||||
```bash
|
||||
$ cd http-add-on
|
||||
```
|
||||
- Use Make to build with:
|
||||
```bash
|
||||
$ make build # build local binaries
|
||||
$ make docker-build # build docker images of the components
|
||||
```
|
||||
|
||||
### Deploying:
|
||||
|
||||
Custom HTTP Add-on as an image
|
||||
|
||||
- Make your changes in the code
|
||||
- Build and publish images with your changes, remembering to update the information for registry of your choice:
|
||||
|
||||
```bash
|
||||
IMAGE_REGISTRY=docker.io IMAGE_REPO=johndoe make docker-publish
|
||||
```
|
||||
|
||||
> Note: If you need to build images to other architecture from your machine, you can use multi-arch building with `IMAGE_REGISTRY=docker.io IMAGE_REPO=johndo make publish-multiarch`.
|
||||
|
||||
There are local clusters with local registries provided, in such cases make sure to use and push your images to its local registry. In the case of MicroK8s, the address is `localhost:32000` and the command would look like the following.
|
||||
|
||||
```bash
|
||||
IMAGE_REGISTRY=localhost:32000 IMAGE_REPO=johndo make deploy
|
||||
```
|
||||
### Load testing with k9s:
|
||||
|
||||
K9s integrates Hey, a CLI tool to benchmark HTTP endpoints similar to AB bench. This preliminary feature currently supports benchmarking port-forwards and services. You can use this feature in load testing as follows:
|
||||
|
||||
- Install an application to scale, we use the provided sample -
|
||||
```console
|
||||
$ helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
```
|
||||
- You'll need to clone the repository to get access to this chart. If you have your own Deployment and Service installed, you can go right to creating an HTTPScaledObject. We use the provided sample HTTPScaledObject -
|
||||
```
|
||||
$ kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```
|
||||
- Testing Your Installation using k9s:
|
||||
```
|
||||
(a) Enter k9s dashboard using command: `k9s`
|
||||
|
||||
(b) search for services using - “:service”
|
||||
|
||||
(c) HTTP traffic needs to route through the Service that the add on has set up. Find interceptor proxy service i.e. ‘keda-add-ons-http-interceptor-proxy’ and port forward it using <SHIFT+F>
|
||||
|
||||
(d) Search for the same port-forward in the list you get by command - “:pf”
|
||||
|
||||
(e) Enter the port-forward and apply <CTRL+L> to start a benchmark
|
||||
|
||||
(f) You can enter the port-forward to see the run stat details and performance.
|
||||
```
|
||||
>You can customize the benchmark in k9s also. It's explained well in [here](https://k9scli.io/topics/bench/).
|
||||
|
||||
## Developer Certificate of Origin: Signing your work
|
||||
|
||||
### Every commit needs to be signed
|
||||
|
||||
The Developer Certificate of Origin (DCO) is a lightweight way for contributors to certify that they wrote or otherwise have the right to submit the code they are contributing to the project. Here is the full text of the DCO, reformatted for readability:
|
||||
|
||||
```
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Contributors sign-off that they adhere to these requirements by adding a `Signed-off-by` line to commit messages.
|
||||
|
||||
```
|
||||
This is my commit message
|
||||
|
||||
Signed-off-by: Random J Developer <random@developer.example.org>
|
||||
```
|
||||
|
||||
Git even has a `-s` command line option to append this automatically to your commit message:
|
||||
|
||||
```console
|
||||
$ git commit -s -m 'This is my commit message'
|
||||
```
|
||||
|
||||
Each Pull Request is checked whether or not commits in a Pull Request do contain a valid Signed-off-by line.
|
||||
|
||||
### I didn't sign my commit, now what?!
|
||||
|
||||
No worries - You can easily replay your changes, sign them and force push them!
|
||||
|
||||
```console
|
||||
$ git checkout <branch-name>
|
||||
$ git reset $(git merge-base main <branch-name>)
|
||||
$ git add -A
|
||||
$ git commit -sm "one commit on <branch-name>"
|
||||
$ git push --force
|
||||
```
|
294
Makefile
294
Makefile
|
@ -1,227 +1,121 @@
|
|||
##################################################
|
||||
# Variables #
|
||||
##################################################
|
||||
SHELL = /bin/bash
|
||||
GIT_TAG?=$(shell git rev-parse --short HEAD)
|
||||
SCALER_DOCKER_IMG?=ghcr.io/kedacore/http-add-on-scaler:sha-${GIT_SHA}
|
||||
INTERCEPTOR_DOCKER_IMG?=ghcr.io/kedacore/keda-http-interceptor:sha-${GIT_TAG}
|
||||
OPERATOR_DOCKER_IMG?=ghcr.io/kedacore/keda-http-operator:sha-${GIT_TAG}
|
||||
NAMESPACE?=kedahttp
|
||||
|
||||
IMAGE_REGISTRY ?= ghcr.io
|
||||
IMAGE_REPO ?= kedacore
|
||||
VERSION ?= main
|
||||
|
||||
IMAGE_OPERATOR ?= ${IMAGE_REGISTRY}/${IMAGE_REPO}/http-add-on-operator
|
||||
IMAGE_INTERCEPTOR ?= ${IMAGE_REGISTRY}/${IMAGE_REPO}/http-add-on-interceptor
|
||||
IMAGE_SCALER ?= ${IMAGE_REGISTRY}/${IMAGE_REPO}/http-add-on-scaler
|
||||
|
||||
IMAGE_OPERATOR_VERSIONED_TAG ?= ${IMAGE_OPERATOR}:$(VERSION)
|
||||
IMAGE_INTERCEPTOR_VERSIONED_TAG ?= ${IMAGE_INTERCEPTOR}:$(VERSION)
|
||||
IMAGE_SCALER_VERSIONED_TAG ?= ${IMAGE_SCALER}:$(VERSION)
|
||||
|
||||
IMAGE_OPERATOR_SHA_TAG ?= ${IMAGE_OPERATOR}:$(GIT_COMMIT_SHORT)
|
||||
IMAGE_INTERCEPTOR_SHA_TAG ?= ${IMAGE_INTERCEPTOR}:$(GIT_COMMIT_SHORT)
|
||||
IMAGE_SCALER_SHA_TAG ?= ${IMAGE_SCALER}:$(GIT_COMMIT_SHORT)
|
||||
|
||||
ARCH ?=amd64
|
||||
CGO ?=0
|
||||
TARGET_OS ?=linux
|
||||
|
||||
BUILD_PLATFORMS ?= linux/amd64,linux/arm64
|
||||
OUTPUT_TYPE ?= registry
|
||||
|
||||
GO_BUILD_VARS= GO111MODULE=on CGO_ENABLED=$(CGO) GOOS=$(TARGET_OS) GOARCH=$(ARCH)
|
||||
GO_LDFLAGS="-X github.com/kedacore/http-add-on/pkg/build.version=${VERSION} -X github.com/kedacore/http-add-on/pkg/build.gitCommit=${GIT_COMMIT}"
|
||||
|
||||
GIT_COMMIT ?= $(shell git rev-list -1 HEAD)
|
||||
GIT_COMMIT_SHORT ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
COSIGN_FLAGS ?= -y -a GIT_HASH=${GIT_COMMIT} -a GIT_VERSION=${VERSION} -a BUILD_DATE=${DATE}
|
||||
|
||||
define DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
DNS.2 = *.keda
|
||||
DNS.3 = *.interceptor-tls-test-ns
|
||||
endef
|
||||
export DOMAINS
|
||||
|
||||
define ABC_DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = abc
|
||||
endef
|
||||
export ABC_DOMAINS
|
||||
|
||||
# Build targets
|
||||
|
||||
build-operator:
|
||||
${GO_BUILD_VARS} go build -ldflags $(GO_LDFLAGS) -trimpath -a -o bin/operator ./operator
|
||||
|
||||
build-interceptor:
|
||||
${GO_BUILD_VARS} go build -ldflags $(GO_LDFLAGS) -trimpath -a -o bin/interceptor ./interceptor
|
||||
#####
|
||||
# scaler targets
|
||||
#####
|
||||
|
||||
.PHONY: build-scaler
|
||||
build-scaler:
|
||||
${GO_BUILD_VARS} go build -ldflags $(GO_LDFLAGS) -trimpath -a -o bin/scaler ./scaler
|
||||
go build -o bin/scaler ./scaler
|
||||
|
||||
build: build-operator build-interceptor build-scaler
|
||||
|
||||
# generate certs for local unit and e2e tests
|
||||
rootca-test-certs:
|
||||
mkdir -p certs
|
||||
openssl req -x509 -nodes -new -sha256 -days 1024 -newkey rsa:2048 -keyout certs/RootCA.key -out certs/RootCA.pem -subj "/C=US/CN=Keda-Root-CA"
|
||||
openssl x509 -outform pem -in certs/RootCA.pem -out certs/RootCA.crt
|
||||
|
||||
test-certs: rootca-test-certs
|
||||
echo "$$DOMAINS" > certs/domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/tls.key -out certs/tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=keda.local"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/domains.ext -out certs/tls.crt
|
||||
echo "$$ABC_DOMAINS" > certs/abc_domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/abc.tls.key -out certs/abc.tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=abc"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/abc.tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/abc_domains.ext -out certs/abc.tls.crt
|
||||
|
||||
clean-test-certs:
|
||||
rm -r certs || true
|
||||
|
||||
# Test targets
|
||||
test: fmt vet test-certs
|
||||
go test ./...
|
||||
|
||||
e2e-test:
|
||||
go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-setup:
|
||||
ONLY_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-local:
|
||||
SKIP_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
# Docker targets
|
||||
docker-build-operator:
|
||||
DOCKER_BUILDKIT=1 docker build . -t ${IMAGE_OPERATOR_VERSIONED_TAG} -t ${IMAGE_OPERATOR_SHA_TAG} -f operator/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
|
||||
docker-build-interceptor:
|
||||
DOCKER_BUILDKIT=1 docker build . -t ${IMAGE_INTERCEPTOR_VERSIONED_TAG} -t ${IMAGE_INTERCEPTOR_SHA_TAG} -f interceptor/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
.PHONY: test-scaler
|
||||
test-scaler:
|
||||
go test ./scaler/...
|
||||
|
||||
.PHONY: docker-build-scaler
|
||||
docker-build-scaler:
|
||||
DOCKER_BUILDKIT=1 docker build . -t ${IMAGE_SCALER_VERSIONED_TAG} -t ${IMAGE_SCALER_SHA_TAG} -f scaler/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
docker build -t ${SCALER_DOCKER_IMG} -f scaler/Dockerfile .
|
||||
|
||||
docker-build: docker-build-operator docker-build-interceptor docker-build-scaler
|
||||
.PHONY: docker-push-scaler
|
||||
docker-push-scaler: docker-build-scaler
|
||||
docker push ${SCALER_DOCKER_IMG}
|
||||
|
||||
docker-publish: docker-build ## Push images on to Container Registry (default: ghcr.io).
|
||||
docker push $(IMAGE_OPERATOR_VERSIONED_TAG)
|
||||
docker push $(IMAGE_OPERATOR_SHA_TAG)
|
||||
docker push $(IMAGE_INTERCEPTOR_VERSIONED_TAG)
|
||||
docker push $(IMAGE_INTERCEPTOR_SHA_TAG)
|
||||
docker push $(IMAGE_SCALER_VERSIONED_TAG)
|
||||
docker push $(IMAGE_SCALER_SHA_TAG)
|
||||
#####
|
||||
# Interceptor targets
|
||||
#####
|
||||
|
||||
publish-operator-multiarch:
|
||||
docker buildx build --output=type=${OUTPUT_TYPE} --platform=${BUILD_PLATFORMS} . -t ${IMAGE_OPERATOR_VERSIONED_TAG} -t ${IMAGE_OPERATOR_SHA_TAG} -f operator/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
.PHONY: build-interceptor
|
||||
build-interceptor:
|
||||
go build -o bin/interceptor ./interceptor
|
||||
|
||||
publish-interceptor-multiarch:
|
||||
docker buildx build --output=type=${OUTPUT_TYPE} --platform=${BUILD_PLATFORMS} . -t ${IMAGE_INTERCEPTOR_VERSIONED_TAG} -t ${IMAGE_INTERCEPTOR_SHA_TAG} -f interceptor/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
.PHONY: test-interceptor
|
||||
test-interceptor:
|
||||
go test ./interceptor/...
|
||||
|
||||
publish-scaler-multiarch:
|
||||
docker buildx build --output=type=${OUTPUT_TYPE} --platform=${BUILD_PLATFORMS} . -t ${IMAGE_SCALER_VERSIONED_TAG} -t ${IMAGE_SCALER_SHA_TAG} -f scaler/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
.PHONY: docker-build-interceptor
|
||||
docker-build-interceptor:
|
||||
docker build -t ${INTERCEPTOR_DOCKER_IMG} -f interceptor/Dockerfile .
|
||||
|
||||
publish-multiarch: publish-operator-multiarch publish-interceptor-multiarch publish-scaler-multiarch
|
||||
.PHONY: docker-push-interceptor
|
||||
docker-push-interceptor: docker-build-interceptor
|
||||
docker push ${INTERCEPTOR_DOCKER_IMG}
|
||||
|
||||
release: manifests kustomize ## Produce new KEDA Http Add-on release in keda-add-ons-http-$(VERSION).yaml file.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
$(KUSTOMIZE) build config/default > keda-add-ons-http-$(VERSION).yaml
|
||||
$(KUSTOMIZE) build config/crd > keda-add-ons-http-$(VERSION)-crds.yaml
|
||||
#####
|
||||
# operator targets
|
||||
#####
|
||||
|
||||
# Development
|
||||
.PHONY: build-operator
|
||||
build-operator:
|
||||
go build -o bin/operator ./operator
|
||||
|
||||
generate: codegen mockgen manifests ## Generate code, manifests, and mocks.
|
||||
.PHONY: test-operator
|
||||
test-operator:
|
||||
go test ./operator/...
|
||||
|
||||
verify: verify-codegen verify-mockgen verify-manifests ## Verify code, manifests, and mocks.
|
||||
.PHONY: docker-build-operator
|
||||
docker-build-operator:
|
||||
docker build -t ${OPERATOR_DOCKER_IMG} -f operator/Dockerfile .
|
||||
|
||||
codegen: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile='hack/boilerplate.go.txt' paths='./...'
|
||||
./hack/update-codegen.sh
|
||||
.PHONY: docker-push-operator
|
||||
docker-push-operator: docker-build-operator
|
||||
docker push ${OPERATOR_DOCKER_IMG}
|
||||
|
||||
verify-codegen: ## Verify code is up to date.
|
||||
./hack/verify-codegen.sh
|
||||
.PHONY: helm-upgrade-operator
|
||||
helm-upgrade-operator:
|
||||
helm upgrade kedahttp ./charts/keda-http-operator \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set images.operator=${OPERATOR_DOCKER_IMG} \
|
||||
--set images.scaler=${SCALER_DOCKER_IMG} \
|
||||
--set images.interceptor=${INTERCEPTOR_DOCKER_IMG}
|
||||
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) crd rbac:roleName='operator' webhook paths='./operator/...' output:crd:artifacts:config='config/crd/bases' output:rbac:artifacts:config='config/operator'
|
||||
$(CONTROLLER_GEN) crd rbac:roleName='scaler' webhook paths='./scaler/...' output:rbac:artifacts:config='config/scaler'
|
||||
$(CONTROLLER_GEN) crd rbac:roleName='interceptor' webhook paths='./interceptor/...' output:rbac:artifacts:config='config/interceptor'
|
||||
.PHONY: helm-delete-operator
|
||||
helm-delete-operator:
|
||||
helm delete -n ${NAMESPACE} kedahttp
|
||||
|
||||
.PHONY: generate-operator
|
||||
generate-operator:
|
||||
cd operator && \
|
||||
make manifests && \
|
||||
cp config/crd/bases/http.keda.sh_httpscaledobjects.yaml ../charts/keda-http-operator/crds/httpscaledobjects.http.keda.sh.yaml
|
||||
|
||||
verify-manifests: ## Verify manifests are up to date.
|
||||
./hack/verify-manifests.sh
|
||||
#####
|
||||
# universal targets
|
||||
#####
|
||||
|
||||
sign-images: ## Sign KEDA images published on GitHub Container Registry
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_SHA_TAG)
|
||||
.PHONY: build-all
|
||||
build-all: build-scaler build-interceptor build-operator
|
||||
|
||||
mockgen: ## Generate mock implementations of Go interfaces.
|
||||
./hack/update-mockgen.sh
|
||||
.PHONY: test-all
|
||||
test-all: test-scaler test-interceptor test-operator
|
||||
|
||||
verify-mockgen: ## Verify mocks are up to date.
|
||||
./hack/verify-mockgen.sh
|
||||
.PHONY: docker-build-all
|
||||
docker-build-all: docker-build-scaler docker-build-interceptor docker-build-operator
|
||||
|
||||
fmt: ## Run go fmt against code.
|
||||
go fmt ./...
|
||||
.PHONY: docker-push-all
|
||||
docker-push-all: docker-push-scaler docker-push-interceptor docker-push-operator
|
||||
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
.PHONY: create-example
|
||||
create-example:
|
||||
kubectl create -f examples/httpscaledobject.yaml --namespace=${NAMESPACE}
|
||||
|
||||
lint: ## Run golangci-lint against code.
|
||||
golangci-lint run
|
||||
.PHONY: delete-example
|
||||
delete-example:
|
||||
kubectl delete --namespace=${NAMESPACE} httpscaledobject xkcd
|
||||
|
||||
pre-commit: ## Run static-checks.
|
||||
pre-commit run --all-files
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/kustomize/kustomize/v5
|
||||
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
deploy: manifests kustomize ## Deploy to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/scaledobject.yaml --group keda.sh --kind ScaledObject --name interceptor --version v1alpha1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/proxy.service.yaml --kind Service --name interceptor-proxy --version v1
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name scaler --version v1
|
||||
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
|
||||
undeploy:
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
.PHONY: helm-upgrade-keda
|
||||
helm-upgrade-keda:
|
||||
helm upgrade keda kedacore/keda \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set watchNamespace=${NAMESPACE}
|
||||
|
||||
.PHONY: helm-delete-keda
|
||||
helm-delete-keda:
|
||||
helm delete -n ${NAMESPACE} keda
|
||||
|
|
40
README.md
40
README.md
|
@ -1,32 +1,17 @@
|
|||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [HTTP Autoscaling Made Simple](#http-autoscaling-made-simple)
|
||||
- [Adopters - Become a listed KEDA user!](#adopters---become-a-listed-keda-user)
|
||||
- [Walkthrough](#walkthrough)
|
||||
- [Design](#design)
|
||||
- [Installation](#installation)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Contributing](#contributing)
|
||||
- [Code of Conduct](#code-of-conduct)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
<p align="center"><img src="https://github.com/kedacore/keda/raw/main/images/logos/keda-word-colour.png" width="300"/></p>
|
||||
|
||||
<p style="font-size: 25px" align="center"><b>Kubernetes-based Event Driven Autoscaling - HTTP Add-on</b></p>
|
||||
<p style="font-size: 25px" align="center"><b>Kubernetes-based Event Driven Autoscaling - HTTP Add-On</b></p>
|
||||
<p style="font-size: 25px" align="center">
|
||||
|
||||
The KEDA HTTP Add-on allows Kubernetes users to automatically scale their HTTP servers up and down (including to/from zero) based on incoming HTTP traffic. Please see our [use cases document](./docs/use_cases.md) to learn more about how and why you would use this project.
|
||||
The KEDA HTTP Add On allows Kubernetes users to automatically scale their HTTP servers up and down (including to/from zero) based on incoming HTTP traffic. Please see our [use cases document](./docs/use_cases.md) to learn more about how and why you would use this project.
|
||||
|
||||
| 🚧 **Project status: beta** 🚧|
|
||||
|---------------------------------------------|
|
||||
| :loudspeaker: **KEDA is actively relying on community contributions to help grow & maintain the add-on. The KEDA maintainers are assisting the community to evolve the add-on but not directly responsible for it.** Feel free to [open a new discussion](https://github.com/kedacore/http-add-on/discussions/new/choose) in case of questions.<br/><br/>⚠ The HTTP Add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/latest). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support.<br/><br/>:bulb: For production-ready needs, you can consider using the [Kedify HTTP Scaler](https://kedify.io/scalers/http), a commercial alternative offering robust and reliable scaling for KEDA. |
|
||||
| ⚠ The HTTP add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/tag/0.1.0). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support.
|
||||
|
||||
## HTTP Autoscaling Made Simple
|
||||
|
||||
[KEDA](https://github.com/kedacore/keda) provides a reliable and well tested solution to scaling your workloads based on external events. The project supports a wide variety of [scalers](https://keda.sh/docs/latest/scalers/) - sources of these events, in other words. These scalers are systems that produce precisely measurable events via an API.
|
||||
[KEDA](https://github.com/kedacore/keda) provides a reliable and well tested solution to scaling your workloads based on external events. The project supports a wide variety of [scalers](https://keda.sh/docs/2.2/scalers/) - sources of these events, in other words. These scalers are systems that produce precisely measurable events via an API.
|
||||
|
||||
KEDA does not, however, include an HTTP-based scaler out of the box for several reasons:
|
||||
|
||||
|
@ -38,21 +23,15 @@ For these reasons, the KEDA core project has purposely not built generic HTTP-ba
|
|||
|
||||
This project, often called KEDA-HTTP, exists to provide that scaling. It is composed of simple, isolated components and includes an opinionated way to put them together.
|
||||
|
||||
## Adopters - Become a listed KEDA user!
|
||||
|
||||
We are always happy to start list users who run KEDA's HTTP Add-on in production or are evaluating it, learn more about it [here](ADOPTERS.md).
|
||||
|
||||
We welcome pull requests to list new adopters.
|
||||
|
||||
## Walkthrough
|
||||
|
||||
Although this is currently a **beta release** project, we have prepared a walkthrough document with instructions on getting started for basic usage.
|
||||
Although this is currently a **beta release** project, we have prepared a walkthrough document that with instructions on getting started for basic usage.
|
||||
|
||||
See that document at [docs/walkthrough.md](./docs/walkthrough.md)
|
||||
|
||||
## Design
|
||||
|
||||
The HTTP Add-on is composed of multiple mostly independent components. This design was chosen to allow for highly
|
||||
The HTTP add-on is composed of multiple mostly independent components. This design was chosen to allow for highly
|
||||
customizable installations while allowing us to ship reasonable defaults.
|
||||
|
||||
- We have written a complete design document. Please see it at [docs/design.md](./docs/design.md).
|
||||
|
@ -63,11 +42,6 @@ customizable installations while allowing us to ship reasonable defaults.
|
|||
|
||||
Please see the [complete installation instructions](./docs/install.md).
|
||||
|
||||
## Roadmap
|
||||
We use GitHub issues to build our backlog, a complete overview of all open items and our planning.
|
||||
|
||||
Learn more about our [roadmap](ROADMAP.md).
|
||||
|
||||
## Contributing
|
||||
|
||||
This project follows the KEDA contributing guidelines, which are outlined in [CONTRIBUTING.md](https://github.com/kedacore/.github/blob/main/CONTRIBUTING.md).
|
||||
|
@ -75,7 +49,7 @@ This project follows the KEDA contributing guidelines, which are outlined in [CO
|
|||
If you would like to contribute code to this project, please see [docs/developing.md](./docs/developing.md).
|
||||
|
||||
---
|
||||
We are a Cloud Native Computing Foundation (CNCF) graduated project.
|
||||
We are a Cloud Native Computing Foundation (CNCF) sandbox project.
|
||||
<p align="center"><img src="https://raw.githubusercontent.com/kedacore/keda/main/images/logo-cncf.svg" height="75px"></p>
|
||||
|
||||
## Code of Conduct
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
# Release Process
|
||||
|
||||
The process of releasing a new version of the KEDA HTTP Add-on involves a few steps, detailed below.
|
||||
|
||||
>The process herein is largely automated but we recognize that there may be more that we can automate. If you find something that _can_ and _should_ be automated, and you believe that you know how, please [submit an issue](https://github.com/kedacore/http-add-on/issues/new?assignees=&labels=needs-discussion%2Cfeature-request&template=Feature_request.md) explaining how.
|
||||
|
||||
## 1: Current and new versions
|
||||
|
||||
Please go to the [releases page](https://github.com/kedacore/http-add-on/releases) and observe what the most recent release is. Specifically, note what the _tag_ of the release is. For example, if [version 0.3.0](https://github.com/kedacore/http-add-on/releases/tag/v0.3.0) is the latest release (it is as the time of this writing), the tag for that is `v0.3.0`.
|
||||
|
||||
To determine the new version, follow [SemVer guidelines](https://semver.org). Most releases will increment the PATCH or MINOR version number.
|
||||
|
||||
## 2. Changelog
|
||||
|
||||
Add a new section in [CHANGELOG.md](CHANGELOG.md) for the new version that is being released along with the new features, patches and deprecations it introduces.
|
||||
|
||||
It should not include every single change but solely what matters to our customers, for example issue template that has changed is not important.
|
||||
|
||||
## 3. Add the new version to GitHub Bug report template
|
||||
|
||||
Add the new released version to the list in `KEDA Version` dropdown in [2_bug_report.yml](https://github.com/kedacore/http-add-on/blob/main/.github/ISSUE_TEMPLATE/2_bug_report.yml).
|
||||
|
||||
## 4. Update documentation references to current version
|
||||
|
||||
Update the links to current version within the file `walkthrough.md`
|
||||
|
||||
> ```console
|
||||
> kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
> ```
|
||||
|
||||
> >If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](THE REFERENCE).
|
||||
|
||||
## 5: Create a new GitHub release
|
||||
|
||||
[Create a new release](https://github.com/kedacore/http-add-on/releases/new) on the GitHub releases page, using your new release number.
|
||||
|
||||
The title of the release should be "Version 1.2.3", substituting `1.2.3` with the new version number, and the Git tag should be `v1.2.3`, again substituting `1.2.3` with your new version number.
|
||||
|
||||
The release description should be a short to medium length summary of what has changed since the last release. The following link will give you a list of commits made since the `v0.3.0` tag: [github.com/kedacore/http-add-on/compare/v0.3.0...main](https://github.com/kedacore/http-add-on/compare/v0.3.0...main). Replace `v0.3.0` for your appropriate most recent last tag to get the commit list and base your release summary on that list.
|
||||
|
||||
After you create the new release, automation in a GitHub action will build and deploy new container images.
|
||||
|
||||
## 6: Submit a PR to the [Helm Charts Repository](https://github.com/kedacore/charts)
|
||||
|
||||
The scope of the changes you'll need to make to the Helm chart vary, but the below list is the minimum set of fields to change:
|
||||
|
||||
- The `appVersion` field in `Chart.yaml`. If you've chosen `1.2.3` as the version number, this field should read:
|
||||
|
||||
```yaml
|
||||
appVersion: 1.2.3
|
||||
```
|
||||
|
||||
- The `images.tag` field in `values.yaml`. If you've chosen `1.2.3` as the version number, this field should read:
|
||||
|
||||
```yaml
|
||||
images:
|
||||
# tag is the image tag to use for all images.
|
||||
# for example, if the operator image is "myoperator" and
|
||||
# tag is "mytag", the operator image used will be
|
||||
# "myoperator:mytag". `latest` is used to indicate the latest
|
||||
# stable release in the official images, `canary` is
|
||||
# the build for the latest commit to the `main` branch,
|
||||
# and you can target any other commit with `sha-<GIT_SHA[0:7]>`
|
||||
tag: 1.2.3
|
||||
```
|
||||
|
||||
>Note: The container images generated by CI/CD in step 2 will have the same tag as the tag you created in the release, minus the `v` prefix. You can always see what images created by going to the container registry page for the [interceptor](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor), [operator](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator) or [scaler](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler)
|
||||
|
||||
|
||||
Once you've made changes to the chart, here's how to do submit the change to the charts repository:
|
||||
|
||||
- Submit a Pull Request (PR) to the [github.com/kedacore/charts](https://github.com/kedacore/charts) repository with your changes. Also ensure that you follow the [Shipping a new version](https://github.com/kedacore/charts/blob/master/CONTRIBUTING.md#shipping-a-new-version) guidelines in the charts documentation to complete the chart release.
|
||||
- Your chart changes must go into the [http-add-on](https://github.com/kedacore/charts/tree/master/http-add-on) directory. The release artifact will go into the [docs](https://github.com/kedacore/charts/tree/master/docs) directory.
|
||||
- Ensure that you add a link to the HTTP Add-on repository and the new release number, so that PR reviewers are aware what the work relates to
|
||||
- Work with the maintainers of that repository to get the chart merged
|
||||
|
||||
After your PR is merged, you've completed the release. Congratulations! You can optionally write a blog post about it; see the next section if you're interested.
|
||||
|
||||
## 7: Publish release on Artifact Hub
|
||||
|
||||
Publish release on Artifact Hub by creating a new version in [kedacore/external-scalers](https://github.com/kedacore/external-scalers/tree/main/artifacthub/add-ons-http).
|
||||
|
||||
## 8: Write a blog post on the documentation site (_optional_)
|
||||
|
||||
If you believe that your release is large enough to warrant a blog post on the [keda.sh/blog](https://keda.sh/blog/) site, please go to [github.com/kedacore/keda-docs](https://github.com/kedacore/keda-docs) and submit a new PR with a blog article about the release.
|
||||
|
||||
Include in the article a longer summary of changes and any important information about the new functionality, bugfixes, or anything else appropriate. The post should go into the [content/blog](https://github.com/kedacore/keda-docs/tree/master/content/blog) directory.
|
27
ROADMAP.md
27
ROADMAP.md
|
@ -1,27 +0,0 @@
|
|||
# Roadmap
|
||||
|
||||
KEDA uses [GitHub issues](https://docs.github.com/en/issues/tracking-your-work-with-issues/about-issues) to track open work items with [GitHub Projects (beta)](https://docs.github.com/en/issues/trying-out-the-new-projects-experience/about-projects) to plan for upcoming releases.
|
||||
|
||||
This document provides insights to the community on how we use it and what to expect.
|
||||
|
||||
You can find our roadmap [here](https://github.com/orgs/kedacore/projects/6/views/1).
|
||||
|
||||
## Using our roadmap
|
||||
|
||||
Here is some guidance on how to use our roadmap.
|
||||
|
||||
### Upcoming Release
|
||||
|
||||
As we work towards our next release, we are planning and tracking work as part of the next release cycle.
|
||||
|
||||
You can find an overview of the items in our upcoming release:
|
||||
|
||||
- As list with the respective categories ([link](https://github.com/orgs/kedacore/projects/6/views/5))
|
||||
- As list with the respective priorities ([link](https://github.com/orgs/kedacore/projects/6/views/2))
|
||||
- As a board with the current status ([link](https://github.com/orgs/kedacore/projects/6/views/4))
|
||||
|
||||
### Triaging
|
||||
|
||||
All newly created issues are automatically added to the roadmap and waiting to be triaged by a maintainer.
|
||||
|
||||
You can find an overview of all issues pending to be triaged [here](https://github.com/orgs/kedacore/projects/6/views/6).
|
|
@ -1,259 +0,0 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: httpscaledobjects.http.keda.sh
|
||||
spec:
|
||||
group: http.keda.sh
|
||||
names:
|
||||
kind: HTTPScaledObject
|
||||
listKind: HTTPScaledObjectList
|
||||
plural: httpscaledobjects
|
||||
shortNames:
|
||||
- httpso
|
||||
singular: httpscaledobject
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.targetWorkload
|
||||
name: TargetWorkload
|
||||
type: string
|
||||
- jsonPath: .status.targetService
|
||||
name: TargetService
|
||||
type: string
|
||||
- jsonPath: .spec.replicas.min
|
||||
name: MinReplicas
|
||||
type: integer
|
||||
- jsonPath: .spec.replicas.max
|
||||
name: MaxReplicas
|
||||
type: integer
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- jsonPath: .status.conditions[?(@.reason=="HTTPScaledObjectIsReady")].status
|
||||
name: Active
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HTTPScaledObject is the Schema for the httpscaledobjects API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HTTPScaledObjectSpec defines the desired state of HTTPScaledObject
|
||||
properties:
|
||||
coldStartTimeoutFailoverRef:
|
||||
description: (optional) The name of the failover service to route
|
||||
HTTP requests to when the target is not available
|
||||
properties:
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
timeoutSeconds:
|
||||
default: 30
|
||||
description: The timeout in seconds to wait before routing to
|
||||
the failover service (Default 30)
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
hosts:
|
||||
description: |-
|
||||
The hosts to route. All requests which the "Host" header
|
||||
matches any .spec.hosts (and the Request Target matches any
|
||||
.spec.pathPrefixes) will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
initialCooldownPeriod:
|
||||
description: (optional) Initial period before scaling
|
||||
format: int32
|
||||
type: integer
|
||||
pathPrefixes:
|
||||
description: |-
|
||||
The paths to route. All requests which the Request Target matches any
|
||||
.spec.pathPrefixes (and the "Host" header matches any .spec.hosts)
|
||||
will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
replicas:
|
||||
description: (optional) Replica information
|
||||
properties:
|
||||
max:
|
||||
description: Maximum amount of replicas to have in the deployment
|
||||
(Default 100)
|
||||
format: int32
|
||||
type: integer
|
||||
min:
|
||||
description: Minimum amount of replicas to have in the deployment
|
||||
(Default 0)
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
scaleTargetRef:
|
||||
description: |-
|
||||
The name of the deployment to route HTTP requests to (and to autoscale).
|
||||
Including validation as a requirement to define either the PortName or the Port
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
scaledownPeriod:
|
||||
description: (optional) Cooldown period value
|
||||
format: int32
|
||||
type: integer
|
||||
scalingMetric:
|
||||
description: (optional) Configuration for the metric used for scaling
|
||||
properties:
|
||||
concurrency:
|
||||
description: Scaling based on concurrent requests for a given
|
||||
target
|
||||
properties:
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
type: object
|
||||
requestRate:
|
||||
description: Scaling based the average rate during an specific
|
||||
time window for a given target
|
||||
properties:
|
||||
granularity:
|
||||
default: 1s
|
||||
description: Time granularity for rate calculation
|
||||
type: string
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
window:
|
||||
default: 1m
|
||||
description: Time window for rate calculation
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
targetPendingRequests:
|
||||
description: (optional) DEPRECATED (use ScalingMetric instead) Target
|
||||
metric value
|
||||
format: int32
|
||||
type: integer
|
||||
timeouts:
|
||||
description: (optional) Timeouts that override the global ones
|
||||
properties:
|
||||
conditionWait:
|
||||
description: How long to wait for the backing workload to have
|
||||
1 or more replicas before connecting and sending the HTTP request
|
||||
(Default is set by the KEDA_CONDITION_WAIT_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
responseHeader:
|
||||
description: How long to wait between when the HTTP request is
|
||||
sent to the backing app and when response headers need to arrive
|
||||
(Default is set by the KEDA_RESPONSE_HEADER_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- scaleTargetRef
|
||||
type: object
|
||||
status:
|
||||
description: HTTPScaledObjectStatus defines the observed state of HTTPScaledObject
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions of the operator
|
||||
items:
|
||||
description: HTTPScaledObjectCondition stores the condition state
|
||||
properties:
|
||||
message:
|
||||
description: Message indicating details about the transition.
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the condition's last transition.
|
||||
enum:
|
||||
- ErrorCreatingAppScaledObject
|
||||
- AppScaledObjectCreated
|
||||
- TerminatingResources
|
||||
- AppScaledObjectTerminated
|
||||
- AppScaledObjectTerminationError
|
||||
- PendingCreation
|
||||
- HTTPScaledObjectIsReady
|
||||
type: string
|
||||
status:
|
||||
description: Status of the condition, one of True, False, Unknown.
|
||||
type: string
|
||||
timestamp:
|
||||
description: Timestamp of the condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of condition
|
||||
enum:
|
||||
- Ready
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
targetService:
|
||||
description: TargetService reflects details about the scaled service.
|
||||
type: string
|
||||
targetWorkload:
|
||||
description: TargetWorkload reflects details about the scaled workload.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
|
@ -1,5 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- bases/http.keda.sh_httpscaledobjects.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizeresource
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ../crd
|
||||
- ../interceptor
|
||||
- ../operator
|
||||
- ../scaler
|
||||
namespace: keda
|
||||
namePrefix: keda-add-ons-http-
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
pairs:
|
||||
app.kubernetes.io/name: http
|
||||
app.kubernetes.io/component: add-on
|
||||
app.kubernetes.io/part-of: keda
|
||||
- includeSelectors: false
|
||||
includeTemplates: false
|
||||
pairs:
|
||||
app.kubernetes.io/version: HEAD
|
||||
app.kubernetes.io/managed-by: kustomize
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-admin
|
||||
spec:
|
||||
ports:
|
||||
- name: admin
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
targetPort: admin
|
|
@ -1,91 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
- key: kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- arm64
|
||||
containers:
|
||||
- name: interceptor
|
||||
image: ghcr.io/kedacore/http-add-on-interceptor
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_CURRENT_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_PROXY_PORT
|
||||
value: "8080"
|
||||
- name: KEDA_HTTP_ADMIN_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_CONNECT_TIMEOUT
|
||||
value: "500ms"
|
||||
- name: KEDA_HTTP_KEEP_ALIVE
|
||||
value: "1s"
|
||||
- name: KEDA_RESPONSE_HEADER_TIMEOUT
|
||||
value: "500ms"
|
||||
- name: KEDA_CONDITION_WAIT_TIMEOUT
|
||||
value: "20s"
|
||||
- name: KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS
|
||||
value: "1000"
|
||||
- name: KEDA_HTTP_FORCE_HTTP2
|
||||
value: "false"
|
||||
- name: KEDA_HTTP_MAX_IDLE_CONNS
|
||||
value: "100"
|
||||
- name: KEDA_HTTP_IDLE_CONN_TIMEOUT
|
||||
value: "90s"
|
||||
- name: KEDA_HTTP_TLS_HANDSHAKE_TIMEOUT
|
||||
value: "10s"
|
||||
- name: KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT
|
||||
value: "1s"
|
||||
ports:
|
||||
- name: admin
|
||||
containerPort: 9090
|
||||
- name: proxy
|
||||
containerPort: 8080
|
||||
- name: metrics
|
||||
containerPort: 2223
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: proxy
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: proxy
|
||||
# TODO(pedrotorres): set better default values avoiding overcommitment
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1000Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: interceptor
|
||||
terminationGracePeriodSeconds: 10
|
|
@ -1,29 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
env:
|
||||
- name: OTEL_PROM_EXPORTER_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_PROM_EXPORTER_PORT
|
||||
value: "2223"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318"
|
||||
- name: OTEL_METRIC_EXPORT_INTERVAL
|
||||
value: "1"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
|
||||
value: "http/protobuf"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318/v1/traces"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
||||
value: "true"
|
|
@ -1,5 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- scaledobject.yaml
|
|
@ -1,6 +0,0 @@
|
|||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
minReplicaCount: 1
|
|
@ -1,38 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
containerPort: 8443
|
||||
env:
|
||||
- name: KEDA_HTTP_PROXY_TLS_ENABLED
|
||||
value: "true"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_PATH
|
||||
value: "/certs/tls.crt"
|
||||
- name: KEDA_HTTP_PROXY_TLS_KEY_PATH
|
||||
value: "/certs/tls.key"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS
|
||||
value: "/additional-certs"
|
||||
- name: KEDA_HTTP_PROXY_TLS_PORT
|
||||
value: "8443"
|
||||
volumeMounts:
|
||||
- readOnly: true
|
||||
mountPath: "/certs"
|
||||
name: certs
|
||||
- readOnly: true
|
||||
mountPath: "/additional-certs/abc-certs"
|
||||
name: abc-certs
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: keda-tls
|
||||
- name: abc-certs
|
||||
secret:
|
||||
secretName: abc-certs
|
|
@ -1,5 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- proxy.service.yaml
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-proxy
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
protocol: TCP
|
||||
port: 8443
|
||||
targetPort: proxy-tls
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- admin.service.yaml
|
||||
- proxy.service.yaml
|
||||
- metrics.service.yaml
|
||||
- service_account.yaml
|
||||
- scaledobject.yaml
|
||||
configurations:
|
||||
- transformerconfig.yaml
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
pairs:
|
||||
app.kubernetes.io/instance: interceptor
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-metrics
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 2223
|
||||
targetPort: metrics
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-proxy
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: proxy
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: proxy
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
minReplicaCount: 3
|
||||
maxReplicaCount: 50
|
||||
pollingInterval: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: interceptor
|
||||
triggers:
|
||||
- type: external
|
||||
metadata:
|
||||
scalerAddress: external-scaler:9090
|
||||
interceptorTargetPendingRequests: '200'
|
|
@ -1,4 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: interceptor
|
|
@ -1,5 +0,0 @@
|
|||
namePrefix:
|
||||
- kind: ScaledObject
|
||||
path: spec/scaleTargetRef/name
|
||||
- kind: ScaledObject
|
||||
path: spec/triggers/metadata/scalerAddress
|
|
@ -1,71 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: operator
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
- key: kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- arm64
|
||||
containers:
|
||||
- name: operator
|
||||
image: ghcr.io/kedacore/http-add-on-operator
|
||||
args:
|
||||
- --leader-elect
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_SERVICE
|
||||
value: "keda-add-ons-http-external-scaler"
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_OPERATOR_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_OPERATOR_WATCH_NAMESPACE
|
||||
value: ""
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
- name: probes
|
||||
containerPort: 8081
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: probes
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: probes
|
||||
# TODO(pedrotorres): set better default values avoiding overcommitment
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1000Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: operator
|
||||
terminationGracePeriodSeconds: 10
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- service_account.yaml
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
pairs:
|
||||
app.kubernetes.io/instance: operator
|
|
@ -1,24 +0,0 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
|
@ -1,4 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: operator
|
|
@ -1,81 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
- key: kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- arm64
|
||||
containers:
|
||||
- name: scaler
|
||||
image: ghcr.io/kedacore/http-add-on-scaler
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_DEPLOYMENT
|
||||
value: "keda-add-ons-http-interceptor"
|
||||
- name: KEDA_HTTP_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_SERVICE
|
||||
value: "keda-add-ons-http-interceptor-admin"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_STREAM_INTERVAL_MS
|
||||
value: "200"
|
||||
ports:
|
||||
- name: grpc
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
grpc:
|
||||
port: 9090
|
||||
service: liveness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
grpc:
|
||||
port: 9090
|
||||
service: readiness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
# TODO(pedrotorres): set better default values avoiding overcommitment
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1000Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: scaler
|
||||
terminationGracePeriodSeconds: 10
|
|
@ -1,6 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 1
|
|
@ -1,4 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- service.yaml
|
||||
- service_account.yaml
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
pairs:
|
||||
app.kubernetes.io/instance: external-scaler
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: scaler
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: scaler
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: scaler
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: scaler
|
||||
namespace: keda
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: scaler
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: scaler
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: external-scaler
|
||||
spec:
|
||||
ports:
|
||||
- name: grpc
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
targetPort: grpc
|
|
@ -1,4 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: scaler
|
|
@ -1,5 +0,0 @@
|
|||
title: KEDA HTTP Add-On
|
||||
description: Documentation for the KEDA HTTP add-on
|
||||
remote_theme: pages-themes/architect@v0.2.0
|
||||
plugins:
|
||||
- jekyll-remote-theme # add this line to the plugins list if you already have one
|
|
@ -1 +0,0 @@
|
|||
<img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=bd8914ff-fcda-4c0c-ab57-6fc671ae6cff" style="display:none;" />
|
|
@ -1,4 +1,4 @@
|
|||
# The Design of HTTP Add-on
|
||||
# The Design of HTTP-Add-On
|
||||
|
||||
This project is primarily a composition of mostly independent components. We've chosen this design so that you can swap out components as you want/need to while still achieving (roughly) the same functionality.
|
||||
|
||||
|
@ -8,7 +8,7 @@ There are three major components in this system. You can find more detail and di
|
|||
|
||||
- [Operator](../operator) - This component listens for events related to `HTTPScaledObject`s and creates, updates or removes internal machinery as appropriate.
|
||||
- [Interceptor](../interceptor) - This component accepts and routes external HTTP traffic to the appropriate internal application, as appropriate.
|
||||
- [Scaler](../scaler) - This component tracks the size of the pending HTTP request queue for a given app and reports it to KEDA. It acts as an [external scaler](https://keda.sh/docs/latest/scalers/external-push/).
|
||||
- [Scaler](../scaler) - This component tracks the size of the pending HTTP request queue for a given app and reports it to KEDA. It acts as an [external scaler](https://keda.sh/docs/2.1/scalers/external-push/).
|
||||
- [KEDA](https://keda.sh) - KEDA acts as the scaler for the user's HTTP application.
|
||||
|
||||
## Functionality Areas
|
||||
|
@ -19,32 +19,18 @@ We've split this project into a few different major areas of functionality, whic
|
|||
|
||||
We've introduced a new [Custom Resource (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) called `HTTPScaledObject.http.keda.sh` - `HTTPScaledObject` for short. Fundamentally, this resource allows an application developer to submit their HTTP-based application name and container image to the system, and have the system deploy all the necessary internal machinery required to deploy their HTTP application and expose it to the public internet.
|
||||
|
||||
The [operator](../operator) runs inside the Kubernetes namespace to which they're deploying their application and watches for these `HTTPScaledObject` resources. When one is created, it does the following:
|
||||
The [operator](../operator) runs inside the Kubernetes namespace to which they're deploying their application and watches for these `HTTPScaledObject` resources. When one is created, it will create a `Deployment` and `Service` for the app, interceptor, and scaler, and a [`ScaledObject`](https://keda.sh/docs/2.1/concepts/scaling-deployments/) which KEDA then uses to scale the application.
|
||||
|
||||
- Update an internal routing table that maps incoming HTTP hostnames to internal applications.
|
||||
- Furnish this routing table information to interceptors so that they can properly route requests.
|
||||
- Create a [`ScaledObject`](https://keda.sh/docs/latest/concepts/scaling-deployments/#scaledobject-spec) for the `Deployment` specified in the `HTTPScaledObject` resource.
|
||||
|
||||
When the `HTTPScaledObject` is deleted, the operator reverses all of the aforementioned actions.
|
||||
When the `HTTPScaledObject` is deleted, the operator then removes all of the aforementioned resources.
|
||||
|
||||
### Autoscaling for HTTP Apps
|
||||
|
||||
After an `HTTPScaledObject` is created and the operator creates the appropriate resources, you must send HTTP requests through the interceptor so that the application is scaled. A Kubernetes `Service` called `keda-add-ons-http-interceptor-proxy` was created when you `helm install`ed the add-on. Send requests to that service.
|
||||
After an `HTTPScaledObject` is created and the operator creates the appropriate resources, there is a public IP address (and DNS entry, if configured) and the interceptor takes over. When HTTP traffic enters the system from the public internet, the interceptor accepts it and forwards it to the app's `Service` IP (it is most commonly configured as a `ClusterIP` service).
|
||||
|
||||
The interceptor keeps track of the number of pending HTTP requests - HTTP requests that it has forwarded but the app hasn't returned. The scaler periodically makes HTTP requests to the interceptor via an internal RPC endpoint - on a separate port from the public server - to get the size of the pending queue. Based on this queue size, it reports scaling metrics as appropriate to KEDA. As the queue size increases, the scaler instructs KEDA to scale up as appropriate. Similarly, as the queue size decreases, the scaler instructs KEDA to scale down.
|
||||
|
||||
#### The Horizontal Pod Autoscaler
|
||||
|
||||
The HTTP Add-on works with the Kubernetes [Horizonal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details) (HPA) -- via KEDA itself -- to execute scale-up and scale-down operations (except for scaling between zero and non-zero replicas). The add-on furnishes KEDA with two metrics - the current number of pending requests for a host, and the desired number (called `targetPendingRequests` in the [HTTPScaledObject](./ref/v0.3.0/http_scaled_object.md)). KEDA then sends these metrics to the HPA, which uses them as the `currentMetricValue` and `desiredMetricValue`, respectively, in the [HPA Algorithm](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details).
|
||||
|
||||
The net effect is that the add-on scales up when your app grows to more pending requests than the `targetPendingRequests` value, and scales down when it has fewer than that value.
|
||||
|
||||
>The aforementioned HPA algorithm is pasted here for convenience: `desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )]`. The value of `targetPendingRequests` will be passed in where `desiredMetricValue` is expected, and the point-in-time metric for number of pending requests will be passed in where `currentMetricValue` is expected.
|
||||
At the same time, the interceptor keeps track of the size of the pending HTTP requests - HTTP requests that it has forwarded but the app hasn't returned. The scaler periodically makes HTTP requests to the interceptor via an internal HTTP endpoint - on a separate port from the public server - to get the size of the pending queue. Based on this queue size, it reports scaling metrics as appropriate to KEDA. As the queue size increases, the scaler instructs KEDA to scale up as appropriate. Similarly, as the queue size decreases, the scaler instructs KEDA to scale down.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Although the HTTP Add-on is very configurable and supports multiple different deployments, the below diagram is the most common architecture that is shipped by default.
|
||||
Although the HTTP add on is very configurable and supports multiple different deployments, the below diagram is the most common architecture that is shipped by default.
|
||||
|
||||

|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -9,7 +9,6 @@ to have the following tools installed:
|
|||
|
||||
- [Golang](http://golang.org/) for development
|
||||
- [Docker](https://docker.com) for building the images and testing it locally
|
||||
- [Pre-commit](https://pre-commit.com/) for static checks (_optional_)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
@ -35,160 +34,42 @@ the service, you can host your own with a series of amazing tools like:
|
|||
|
||||
Follow the [install instructions](./install.md) to check out how to install and get this add-on up and running.
|
||||
|
||||
## Build scripts
|
||||
|
||||
This project uses [Mage](https://magefile.org) as opposed to Make because it's way faster to build and push images, as well as to run tests and other common tasks. Please install [version v1.11.0](https://github.com/magefile/mage/releases/tag/v1.11.0) or above to have access to the task runner.
|
||||
|
||||
### In the Root Directory
|
||||
|
||||
The Makefile located in the root directory has targets useful for the whole project.
|
||||
The Magefile located in the root directory has targets useful for the whole project. There is another magefile [in the operator directory](../operator/magefile.go), which has targets more specific to the operator module.
|
||||
|
||||
> All commands are case sensitive.
|
||||
The most useful and common commands from the root directory are listed below. Please see the "In the Operator Directory" section for the operator-specific targets. Whther you're in the root or the operator directory, you can always run the following general helper commands:
|
||||
|
||||
- `make build`: Builds all the binaries for local testing
|
||||
- `make test`: Run all unit tests
|
||||
- `make e2e-test`: Run all e2e tests
|
||||
- `make docker-build`: Builds all docker images
|
||||
- `make docker-publish`: Build and push all Docker images
|
||||
- `make publish-multiarch`: Build and push all Docker images for `linux/arm64` and `linux/amd64`
|
||||
- `make manifests`: Generate all the manifest files for Kubernetes, it's important to build after every change
|
||||
- `make deploy`: Deploys the HTTP Add-on to the cluster selected in `~/.kube/config` using `config` folder manifests
|
||||
- `make pre-commit`: Execute static checks
|
||||
- `mage -l`: shows a list of all available commands
|
||||
- `mage -h <command>`: shows command-specific details
|
||||
- `mage -h`: shows the general help
|
||||
|
||||
> All commands are case insensitive, so `buildAll` and `buildall` are the same.
|
||||
|
||||
- `mage build`: Builds all the binaries for local testing.
|
||||
- `mage test`: Tests the entire codebase
|
||||
- `mage dockerbuild`: Builds all docker images
|
||||
- Please see the below "Environment Variables" section for more information on this command
|
||||
- `mage dockerpush`: Pushes all docker images, without building them first
|
||||
- Please see the below "Environment Variables" section for more information on this command
|
||||
|
||||
### In the Operator Directory
|
||||
|
||||
- `mage Manifests`: Builds all the manifest files for Kubernetes, it's important to build after every change
|
||||
to a Kustomize annotation.
|
||||
- `mage All`: Generates the operator.
|
||||
|
||||
### Required Environment Variables
|
||||
|
||||
Some of the above commands support changes in the default values:
|
||||
Some of the above commands require several environment variables to be set. You should set them once in your environment to ensure that you can run these targets. We recommend using [direnv](https://direnv.net) to set these environment variables once, so that you don't need to remember to do it.
|
||||
|
||||
- `IMAGE_REGISTRY`: Image registry to be used for docker images
|
||||
- `IMAGE_REPO`: Repository to be used for docker images
|
||||
- `VERSION`: Tag to be used for docker images
|
||||
- `BUILD_PLATFORMS`: Built platform targeted for multi-arch docker images
|
||||
- `KEDAHTTP_SCALER_IMAGE`: the fully qualified name of the [scaler](../scaler) image. This is used to build, push, and install the scaler into a Kubernetes cluster (required)
|
||||
- `KEDAHTTP_INTERCEPTOR_IMAGE`: the fully qualified name of the [interceptor](../interceptor) image. This is used to build, push, and install the interceptor into a Kubernetes cluster (required)
|
||||
- `KEDAHTTP_OPERATOR_IMAGE`: the fully qualified name of the [operator](../operator) image. This is used to build, push, and install the operator into a Kubernetes cluster (required)
|
||||
- `KEDAHTTP_NAMESPACE`: the Kubernetes namespace to which to install the add on and other required components (optional, defaults to `kedahttp`)
|
||||
|
||||
## Debugging and Observing Components
|
||||
|
||||
The below tips assist with debugging, introspecting, or observing the current state of a running HTTP add-on installation. They involve making network requests to cluster-internal (i.e. `ClusterIP` `Service`s).
|
||||
|
||||
There are generally two ways to communicate with these services. In the following sections, we'll assume you are using the `kubectl proxy` method, but the most instructions will be simple enough to adapt to other methods.
|
||||
|
||||
We'll also assume that you have set the `$NAMESPACE` environment variable in your environment to the namespace in which the HTTP add-on is installed.
|
||||
|
||||
### Use `kubectl proxy`
|
||||
|
||||
`kubectl proxy` establishes an authenticated connection to the Kubernetes API server, runs a local web server, and lets you execute REST API requests against `localhost` as if you were executing them against the Kubernetes API server.
|
||||
|
||||
To establish one, run the following command in a separate terminal window:
|
||||
|
||||
```console
|
||||
kubectl proxy -p 9898
|
||||
```
|
||||
|
||||
>You'll keep this proxy running throughout all of your testing, so make sure you keep this terminal window open.
|
||||
|
||||
### Use a dedicated running pod
|
||||
|
||||
The second way to communicate with these services is almost the opposite as the previous. Instead of bringing the API server to you with `kubectl proxy`, you'll be creating an execution environment closer to the API server.
|
||||
|
||||
First, launch a container with an interactive console in Kubernetes with the following command (substituting your namespace in for `$NAMESPACE`):
|
||||
|
||||
```console
|
||||
kubectl run -it alpine --image=alpine -n $NAMESPACE
|
||||
```
|
||||
|
||||
Then, when you see a `curl` command below, replace the entire path up to and including the `/proxy/` segment with just the name of the service and its port. For example, `curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/routing_ping` would just become `curl -L keda-add-ons-http-interceptor-admin:9090/routing_ping`
|
||||
|
||||
### Interceptor
|
||||
|
||||
Any interceptor pod has both a _proxy_ and _admin_ server running inside it. The proxy server is where users send HTTP requests to, and the admin server is for internal use. The admin server runs on a separate port, fronted by a separate `Service`.
|
||||
|
||||
The admin server also performs following tasks:
|
||||
|
||||
1. Prompt the interceptor to re-fetch the routing table, or
|
||||
2. Print out the interceptor's current routing table (useful for debugging)
|
||||
|
||||
#### Configuration
|
||||
|
||||
Run the following `curl` command to get the running configuration of the interceptor:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/config
|
||||
```
|
||||
|
||||
#### Routing Table
|
||||
|
||||
To prompt the interceptor to fetch the routing table, then print it out:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/routing_ping
|
||||
```
|
||||
|
||||
Or, to just ask the interceptor to print out its routing table:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/routing_table
|
||||
```
|
||||
|
||||
#### Queue Counts
|
||||
|
||||
To fetch the state of an individual interceptor's pending HTTP request queue:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/queue
|
||||
```
|
||||
|
||||
#### Deployment Cache
|
||||
|
||||
To fetch the current state of an individual interceptor's deployment queue:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-interceptor-admin:9090/proxy/deployments
|
||||
```
|
||||
|
||||
The output of this command is a JSON map where the keys are the deployment name and the values are the latest known number of replicas for that deployment.
|
||||
|
||||
### Operator
|
||||
|
||||
Like the interceptor, the operator has an admin server that has HTTP endpoints against which you can run `curl` commands.
|
||||
|
||||
#### Configuration
|
||||
|
||||
Run the following `curl` command to get the running configuration of the operator:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-operator-admin:9090/proxy/config
|
||||
```
|
||||
|
||||
#### Routing Table
|
||||
|
||||
The operator has a similar `/routing_table` endpoint as the interceptor. That data returned from this endpoint, however, is the source of truth. Interceptors fetch their copies of the routing table from this endpoint. Accessing data from this endpoint is similar.
|
||||
|
||||
Fetch the operator's routing table with the following command:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-operator-admin:9090/proxy/routing_table
|
||||
```
|
||||
|
||||
### Scaler
|
||||
|
||||
Like the interceptor, the scaler has an HTTP admin interface against which you can run `curl` commands.
|
||||
|
||||
#### Configuration
|
||||
|
||||
Run the following `curl` command to get the running configuration of the scaler:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-external-scaler:9091/proxy/config
|
||||
```
|
||||
|
||||
#### Queue Counts
|
||||
|
||||
The external scaler fetches pending queue counts from each interceptor in the system, aggregates and stores them, and then returns them to KEDA when requested. KEDA fetches these data via the [standard gRPC external scaler interface](https://keda.sh/docs/2.3/concepts/external-scalers/#external-scaler-grpc-interface).
|
||||
|
||||
For convenience, the scaler also provides a plain HTTP server from which you can also fetch these metrics. Fetch the queue counts from this HTTP server with the following command:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-external-scaler:9091/proxy/queue
|
||||
```
|
||||
|
||||
Alternatively, you can prompt the scaler to fetch counts from all interceptors, aggregate, store, and return counts:
|
||||
|
||||
```console
|
||||
curl -L localhost:9898/api/v1/namespaces/$NAMESPACE/services/keda-add-ons-http-external-scaler:9091/proxy/queue_ping
|
||||
```
|
||||
|
||||
[Go back to landing page](./)
|
||||
>Suffic any `*_IMAGE` variable with `<keda-git-sha>` and the build system will automatically replace it with `sha-$(git rev-parse --short HEAD)`
|
||||
|
|
38
docs/faq.md
38
docs/faq.md
|
@ -1,10 +1,10 @@
|
|||
# FAQ
|
||||
# KEDA-HTTP Frequently Asked Questions
|
||||
|
||||
## Why does this project route HTTP requests?
|
||||
## Why Does This Project Route HTTP Requests?
|
||||
|
||||
In order to autoscale a workload, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
In order to autoscale a `Deployment`, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
|
||||
## How is this project similar or different from [Osiris](https://github.com/deislabs/osiris)?
|
||||
## How is this Project Similar or Different from [Osiris](https://github.com/deislabs/osiris)?
|
||||
|
||||
Osiris and KEDA-HTTP have similar features:
|
||||
|
||||
|
@ -13,30 +13,28 @@ Osiris and KEDA-HTTP have similar features:
|
|||
|
||||
However, Osiris and KEDA-HTTP differ in several ways:
|
||||
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment`, `StatefulSet`, `/scale` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- The KEDA-HTTP operator can automatically deploy and configure networking and compute resources necessary for an HTTP application to autoscale. Osiris does not have this functionality.
|
||||
- Osiris is currently archived in GitHub.
|
||||
- Osiris is currently archived in GitHub
|
||||
|
||||
## How is this project similar or different from [Knative](https://knative.dev/)?
|
||||
## How is this Project Similar or Different from [KNative](https://knative.dev/)?
|
||||
|
||||
Knative Serving and KEDA-HTTP both have core support for autoscaling, including scale-to-zero of compute workloads. KEDA-HTTP is focused solely on deploying production-grade autoscaling HTTP applications, while Knative builds in additional functionality:
|
||||
KNative serving and KEDA-HTTP both have core support for autoscaling, including scale-to-zero of compute workloads. KEDA-HTTP is focused solely on deploying production-grade autoscaling HTTP applications, while KNative builds in additional functionality:
|
||||
|
||||
- Pure [event-based workloads](https://knative.dev/docs/eventing/). [KEDA core](https://github.com/kedacore/keda), without KEDA-HTTP, can support such workloads natively.
|
||||
- Complex deployment strategies like [blue-green](https://knative.dev/docs/serving/samples/blue-green-deployment/).
|
||||
- Supporting other autoscaling mechanisms beyond the built-in [HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), such as the [Knative Pod Autoscaler (KPA)](https://knative.dev/docs/serving/autoscaling/autoscaling-concepts/#knative-pod-autoscaler-kpa).
|
||||
- Complex deployment strategies like [blue-green](https://knative.dev/docs/serving/samples/blue-green-deployment/)
|
||||
- Supporting other autoscaling mechanisms beyond the built-in [HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), such as the [KNative Pod Autoscaler (KPA)](https://knative.dev/docs/serving/autoscaling/autoscaling-concepts/#knative-pod-autoscaler-kpa)
|
||||
|
||||
Additionally, Knative supports a service mesh, while KEDA-HTTP does not out of the box (support for that is forthcoming).
|
||||
Additionally, KNative supports a service mesh, while KEDA-HTTP does not out of the box (support for that is forthcoming).
|
||||
|
||||
## How is this project similar or different from [OpenFaaS](https://www.openfaas.com/)
|
||||
## How is this Project Similar or Different from [OpenFaaS](https://www.openfaas.com/)
|
||||
|
||||
OpenFaaS and KEDA-HTTP both can deploy and autoscale HTTP workloads onto Kubernetes, but they have several important differences that make them suitable for different use cases:
|
||||
|
||||
- OpenFaaS requires the use of a CLI to deploy code to production.
|
||||
- OpenFaaS requires the use of a CLI to deploy code to production
|
||||
- OpenFaaS primarily supports the event-based "functions as a service" pattern. This means:
|
||||
- You deploy code, in a supported language, to handle an HTTP request and OpenFaaS takes care of serving and invoking your code for you.
|
||||
- You deploy complete containers with your HTTP server process in them to KEDA-HTTP.
|
||||
- You don't need to build a container image to deploy code to OpenFaaS, while you do to deploy to KEDA-HTTP.
|
||||
- OpenFaaS can run either on or off Kubernetes, while KEDA-HTTP is Kubernetes-only.
|
||||
- OpenFaaS requires several additional components when running in Kubernetes, like Prometheus. The documentation refers to this as the [PLONK stack](https://docs.openfaas.com/deployment/#plonk-stack).
|
||||
|
||||
[Go back to landing page](./)
|
||||
- You deploy code, in a supported language, to handle an HTTP request and OpenFaaS takes care of serving and invoking your code for you
|
||||
- You deploy complete containers with your HTTP server process in them to KEDA-HTTP
|
||||
- You don't need to build a container image to deploy code to OpenFaaS, while you do to deploy to KEDA-HTTP
|
||||
- OpenFaaS can run either on or off Kubernetes, while KEDA-HTTP is Kubernetes-only
|
||||
- OpenFaaS requires several additional components when running in Kubernetes, like Prometheus. The documentation refers to this as the [PLONK stack](https://docs.openfaas.com/deployment/#plonk-stack)
|
||||
|
|
|
@ -1450,4 +1450,4 @@
|
|||
"gridSize": null,
|
||||
"viewBackgroundColor": "#ffffff"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,91 +1,71 @@
|
|||
# Installing the KEDA HTTP Add-on
|
||||
# Installing the KEDA HTTP Add On
|
||||
|
||||
The HTTP Add-on is highly modular and, as expected, builds on top of KEDA core. Below are some additional components:
|
||||
The HTTP Add On is highly modular and, as expected, builds on top of KEDA core. Below are some additional components:
|
||||
|
||||
- **Operator** - watches for `HTTPScaledObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Operator** - watches for `ScaledHTTPObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Scaler** - communicates scaling-related metrics to KEDA. By default, the operator will install this for you as necessary.
|
||||
- **Interceptor** - a cluster-internal proxy that proxies incoming HTTP requests, communicating HTTP queue size metrics to the scaler, and holding requests in a temporary request queue when there are not yet any available app `Pod`s ready to serve. By default, the operator will install this for you as necessary.
|
||||
>There is [pending work](https://github.com/kedacore/http-add-on/issues/354) that may eventually make this component optional.
|
||||
|
||||
## Before You Start: Cluster-global vs. Namespaced Installation
|
||||
>There is [pending work in KEDA](https://github.com/kedacore/keda/issues/615) that will eventually make this component optional. See [issue #6 in this repository](https://github.com/kedacore/http-add-on/issues/6) for even more background
|
||||
|
||||
Both KEDA and the HTTP Add-on can be installed in either cluster-global or namespaced mode. In the former case, your `ScaledObject`s and `HTTPScaledObject`s (respectively) can be installed in any namespace, and one installation will detect and process it. In the latter case, you must install your `ScaledObject`s and `HTTPScaledObject`s in a specific namespace.
|
||||
|
||||
You have the option of installing KEDA and the HTTP Add-on in either mode, but if you install one as cluster-global, the other must also be cluster-global. Similarly, if you install one as namespaced, the also must also be namespaced in the same namespace.
|
||||
## Installing KEDA
|
||||
|
||||
Before you install any of these components, you need to install KEDA. Below are simplified instructions for doing so with [Helm](https://helm.sh), but if you need anything more customized, please see the [official KEDA deployment documentation](https://keda.sh/docs/2.0/deploy/). If you need to install Helm, refer to the [installation guide](https://helm.sh/docs/intro/install/).
|
||||
|
||||
>This document will rely on environment variables such as `${NAMESPACE}` to indicate a value you should customize and provide to the relevant command. In the below `helm install` command, `${NAMESPACE}` should be the namespace you'd like to install KEDA into.
|
||||
>This document will rely on environment variables such as `${NAMESPACE}` to indicate a value you should customize and provide to the relevant command. In the above `helm install` command, `${NAMESPACE}` should be the namespace you'd like to install KEDA into. KEDA and the HTTP Addon provide scaling functionality to only one namespace per installation.
|
||||
|
||||
```console
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm install keda kedacore/keda --namespace ${NAMESPACE} --create-namespace
|
||||
helm install keda kedacore/keda --namespace ${NAMESPACE} --set watchNamespace=${NAMESPACE}
|
||||
```
|
||||
|
||||
>The above command installs KEDA in cluster-global mode. Add `--set watchNamespace=<target namespace>` to install KEDA in namespaced mode.
|
||||
## Install via Helm Chart
|
||||
|
||||
## Install the Add-on via Helm Chart
|
||||
|
||||
The Helm chart for this project is within KEDA's default helm repository at [kedacore/charts](http://github.com/kedacore/charts), you can install it by running:
|
||||
This repository is within KEDA's default helm repository on [kedacore/charts](http://github.com/kedacore/charts), you can install it by running:
|
||||
|
||||
```console
|
||||
helm install http-add-on kedacore/keda-add-ons-http --namespace ${NAMESPACE}
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE}
|
||||
```
|
||||
>The above command installed the HTTP Add-on in cluster-global mode. Add `--set operator.watchNamespace=<target namespace>` to install the HTTP Add-on in namepaced mode. If you do this, you must also install KEDA in namespaced mode and use the same target namespace.
|
||||
|
||||
>Installing the HTTP Add-on won't affect any running workloads in your cluster. You'll need to install an `HTTPScaledObject` for each individual `Deployment` you want to scale. For more on how to do that, please see the [walkthrough](./walkthrough.md).
|
||||
>Installing the HTTP add on won't affect any running workloads in your cluster. You'll need to install an `HTTPScaledObject` for each individual `Deployment` you want to scale. For more on how to do that, please see the [walkthrough](./walkthrough.md).
|
||||
|
||||
### Customizing the Installation
|
||||
|
||||
There are a few values that you can pass to the above `helm install` command by including `--set NAME=VALUE` on the command line.
|
||||
|
||||
- `images.operator` - the name of the operator's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-operator`](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator).
|
||||
- `images.scaler` - the name of the scaler's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-scaler`](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler).
|
||||
- `images.interceptor` - the name of the interceptor's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-interceptor`](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor).
|
||||
- `images.operator` - the name of the operator's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-operator`](https://github.com/orgs/kedacore/packages/container/package/http-add-on-operator).
|
||||
- `images.scaler` - the name of the scaler's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-scaler`](https://github.com/orgs/kedacore/packages/container/package/http-add-on-scaler).
|
||||
- `images.interceptor` - the name of the interceptor's Docker image, not including the tag. Defaults to [`ghcr.io/kedacore/http-add-on-interceptor`](https://github.com/orgs/kedacore/packages/container/package/http-add-on-interceptor).
|
||||
- `images.tag` - the tag to use for all the above docker images. Defaults to the [latest stable release](https://github.com/kedacore/http-add-on/releases).
|
||||
|
||||
>If you want to install the latest build of the HTTP Add-on, set `version` to `canary`:
|
||||
>If you want to install the latest build of the HTTP Add on, set `version` to `canary`:
|
||||
|
||||
```console
|
||||
helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE} --set images.tag=canary
|
||||
```
|
||||
|
||||
For an exhaustive list of configuration options, see the official HTTP Add-on chart [values.yaml file](https://github.com/kedacore/charts/blob/master/http-add-on/values.yaml).
|
||||
For an exhaustive list of configuration options, see the official HTTP Addon chart [values.yaml file](https://github.com/kedacore/charts/blob/master/http-add-on/values.yaml).
|
||||
|
||||
### A Note for Developers and Local Cluster Users
|
||||
|
||||
Local clusters like [Microk8s](https://microk8s.io/) offer in-cluster image registries. These are popular tools to speed up and ease local development. If you use such a tool for local development, we recommend that you use and push your images to its local registry. When you do, you'll want to set your `images.*` variables to the address of the local registry. In the case of MicroK8s, that address is `localhost:32000` and the `helm install` command would look like the following:
|
||||
|
||||
```console
|
||||
```shell
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm pull kedacore/keda-add-ons-http --untar --untardir ./charts
|
||||
helm upgrade kedahttp ./charts/keda-add-ons-http \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
```
|
||||
|
||||
## Compatibility Table
|
||||
|
||||
| HTTP Add-On version | KEDA version | Kubernetes version |
|
||||
|---------------------|-------------------|--------------------|
|
||||
| main | v2.16 | v1.30 - v1.32 |
|
||||
| 0.10.0 | v2.16 | v1.30 - v1.32 |
|
||||
| 0.9.0 | v2.16 | v1.29 - v1.31 |
|
||||
| 0.8.0 | v2.14 | v1.27 - v1.29 |
|
||||
| 0.7.0 | v2.13 | v1.27 - v1.29 |
|
||||
| 0.6.0 | v2.12 | v1.26 - v1.28 |
|
||||
| 0.5.1 | v2.10 | v1.24 - v1.26 |
|
||||
| 0.5.0 | v2.9 | v1.23 - v1.25 |
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you're finished installing KEDA and the HTTP Add-on, please proceed to the [walkthrough](./walkthrough.md) to test out your new installation.
|
||||
|
||||
[Go back to landing page](./)
|
||||
Now that you're finished installing KEDA and the HTTP Add On, please proceed to the [walkthrough](./walkthrough.md) to test out your new installation.
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
# Integrations
|
||||
|
||||
## Istio
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Proxy Service in Virtual Service:**
|
||||
|
||||
- Within the Istio virtual service definition, add a proxy service as a route destination.
|
||||
- Set the host of this proxy service to `keda-add-ons-http-interceptor-proxy`` (the KEDA HTTP Addon interceptor service).
|
||||
- Set the port to `8080`` (the default interceptor port).
|
||||
|
||||
**Example yaml**
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: example
|
||||
namespace: default
|
||||
spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: keda-add-ons-http-interceptor-proxy
|
||||
port: 8080
|
||||
```
|
||||
|
||||
2. **Namespace Alignment:**
|
||||
|
||||
- Ensure that both the KEDA HTTP Addon and the Istio virtual service are deployed within the same Kubernetes namespace. This ensures proper communication between the components.
|
||||
|
||||
### Behavior
|
||||
|
||||
- When a user makes a request, the Istio virtual service routes it to the KEDA HTTP Addon interceptor service.
|
||||
- The interceptor service captures request metrics and relays them to the KEDA scaler component.
|
||||
- Based on these metrics and scaling rules defined in the KEDA configuration, the KEDA scaler automatically scales the target workload (e.g., a deployment) up or down (including scaling to zero).
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
1. **Error: `context marked done while waiting for workload reach > 0 replicas`**
|
||||
|
||||
- This error indicates that the `KEDA_CONDITION_WAIT_TIMEOUT` value (default: 20 seconds) might be too low. The workload scaling process may not be complete within this timeframe.
|
||||
- To increase the timeout:
|
||||
- If using Helm, adjust the `interceptor.replicas.waitTimeout`` parameter (see reference below).
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139)
|
||||
|
||||
2. **502 Errors with POST Requests:**
|
||||
|
||||
- You might encounter 502 errors during POST requests when the request is routed through the interceptor service. This could be due to insufficient timeout settings.
|
||||
- To adjust timeout parameters:
|
||||
- If using Helm, modify the following parameters (see reference below):
|
||||
- `KEDA_HTTP_CONNECT_TIMEOUT`
|
||||
- `KEDA_RESPONSE_HEADER_TIMEOUT`
|
||||
- `KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT`
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152)
|
||||
|
||||
3. **Immediate Scaling Down to Zero:**
|
||||
- If `minReplica` is set to 0 in the HTTPScaledObject, the application will immediately scale down to 0.
|
||||
- There's currently no built-in mechanism to delay this initial scaling.
|
||||
- A PR is in progress to add this support: [https://github.com/kedacore/keda/pull/5478](https://github.com/kedacore/keda/pull/5478)
|
||||
- As a workaround, keep `minReplica` initially as 1 and update it to 0 after the desired delay.
|
||||
|
||||
---
|
||||
|
||||
## Azure Front Door
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Service Setup in Front Door:**
|
||||
- Set up Azure Front Door to route traffic to your AKS cluster.
|
||||
- Ensure that the `Origin Host` header matches the actual AKS host. Front Door enforces case-sensitive routing, so configure the `Origin Host` exactly as the AKS host name.
|
||||
|
||||
2. **KEDA HTTP Add-on Integration:**
|
||||
- Use an `HTTPScaledObject` to manage scaling based on incoming traffic.
|
||||
- Front Door should route traffic to the KEDA HTTP Add-on interceptor service in your AKS cluster.
|
||||
|
||||
3. **Case-Sensitive Hostnames:**
|
||||
- Be mindful that Azure Front Door treats the `Origin Host` header in a case-sensitive manner.
|
||||
- Ensure consistency between the AKS ingress hostname (e.g., `foo.bar.com`) and Front Door configuration.
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
- **404 Error for Hostnames with Different Case:**
|
||||
- Requests routed with inconsistent casing (e.g., `foo.Bar.com` vs. `foo.bar.com`) will trigger 404 errors. Make sure the `Origin Host` header matches the AKS ingress host exactly.
|
||||
- If you encounter errors like `PANIC=value method k8s.io/apimachinery/pkg/types.NamespacedName.MarshalLog called using nil *NamespacedName pointer`, verify the `Origin Host` header configuration.
|
||||
|
||||
### Expected Behavior
|
||||
|
||||
- Azure Front Door routes traffic to AKS based on a case-sensitive host header.
|
||||
- The KEDA HTTP Add-on scales the workload in response to traffic, based on predefined scaling rules.
|
||||
|
||||
|
||||
---
|
|
@ -1,62 +0,0 @@
|
|||
# Configuring metrics for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Exportable metrics:
|
||||
* **Pending request count** - the number of pending requests for a given host.
|
||||
* **Total request count** - the total number of requests for a given host with method, path and response code attributes.
|
||||
|
||||
There are currently 2 supported methods for exposing metrics from the interceptor proxy service - via a Prometheus compatible metrics endpoint or by pushing metrics to a OTEL HTTP collector.
|
||||
|
||||
### Configuring the Prometheus compatible metrics endpoint
|
||||
When configured, the interceptor proxy can expose metrics on a Prometheus compatible endpoint.
|
||||
|
||||
This endpoint can be enabled by setting the `OTEL_PROM_EXPORTER_ENABLED` environment variable to `true` on the interceptor deployment (`true` by default) and by setting `OTEL_PROM_EXPORTER_PORT` to an unused port for the endpoint to be made avaialble on (`2223` by default).
|
||||
|
||||
### Configuring the OTEL HTTP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
The OTEL exporter can be enabled by setting the `OTEL_EXPORTER_OTLP_METRICS_ENABLED` environment variable to `true` on the interceptor deployment (`false` by default). When enabled the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must also be configured so the exporter knows what collector to send the metrics to (e.g. http://opentelemetry-collector.open-telemetry-system:4318).
|
||||
|
||||
If you need to provide any headers such as authentication details in order to utilise your OTEL collector you can add them into the `OTEL_EXPORTER_OTLP_HEADERS` environment variable. The frequency at which the metrics are exported can be configured by setting `OTEL_METRIC_EXPORT_INTERVAL` to the number of seconds you require between each export interval (`30` by default).
|
||||
|
||||
# Configuring TLS for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
The interceptor proxy has the ability to run both a HTTP and HTTPS server simultaneously to allow you to scale workloads that use either protocol. By default, the interceptor proxy will only serve over HTTP, but this behavior can be changed by configuring the appropriate environment variables on the deployment.
|
||||
|
||||
The TLS server can be enabled by setting the environment variable `KEDA_HTTP_PROXY_TLS_ENABLED` to `true` on the interceptor deployment (`false` by default). The TLS server will start on port `8443` by default, but this can be configured by setting `KEDA_HTTP_PROXY_TLS_PORT` to your desired port number. The TLS server will require valid TLS certificates to start, the path to the certificates can be configured via the `KEDA_HTTP_PROXY_TLS_CERT_PATH` and `KEDA_HTTP_PROXY_TLS_KEY_PATH` environment variables (`/certs/tls.crt` and `/certs/tls.key` by default).
|
||||
|
||||
For setting multiple TLS certs, set `KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS` with comma-separated list of directories that will be recursively searched for any valid cert/key pairs. Currently, two naming patterns are supported
|
||||
* `XYZ.crt` + `XYZ.key` - this is a convention when using Kubernetes Secrets of type tls
|
||||
* `XYZ.pem` + `XYZ-key.pem`
|
||||
|
||||
To disable certificate chain verification, set `KEDA_HTTP_PROXY_TLS_SKIP_VERIFY` to `false`
|
||||
|
||||
The matching between certs and requests is performed during the TLS ClientHelo message, where the SNI service name is compared to SANs provided in each cert and the first matching cert will be used for the rest of the TLS handshake.
|
||||
# Configuring tracing for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Supported Exporters:
|
||||
* **console** - The console exporter is useful for development and debugging tasks, and is the simplest to set up.
|
||||
* **http/protobuf** - To send trace data to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
* * **grpc** - To configure exporter to send trace data over gRPC connection to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
|
||||
### Configuring tracing with console exporter
|
||||
|
||||
To enable tracing with the console exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `console` (`console` by default). Other protocols include (`http/protobuf` and `grpc`).
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to `"http://localhost:4318/v1/traces"` (`"http://localhost:4318/v1/traces"` by default).
|
||||
|
||||
|
||||
### Configuring tracing with OTLP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
To enable tracing with otlp exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `otlphttp` (`console` by default). Other protocols include (`http/protobuf` and `grpc`)
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to the collector to send the traces to (e.g. http://opentelemetry-collector.open-telemetry-system:4318/v1/traces) (`"http://localhost:4318/v1/traces"` by default).
|
||||
NOTE: full path is required to be set including <scheme><url><port><path>
|
||||
|
||||
|
||||
Optional variables
|
||||
`OTEL_EXPORTER_OTLP_HEADERS` - To pass any extra headers to the spans to utilise your OTEL collector e.g. authentication details (`"key1=value1,key2=value2"`)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_INSECURE` - To send traces to the tracing via HTTP rather than HTTPS (`false` by default)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - The batcher timeout in seconds to send batch of data points (`5` by default)
|
||||
|
||||
### Configuring Service Failover
|
|
@ -1,13 +0,0 @@
|
|||
# KEDA HTTP Add-On
|
||||
|
||||
Here is an overview of detailed documentation:
|
||||
|
||||
- [Why build an HTTP add-on?](scope.md)
|
||||
- [Install](install.md)
|
||||
- [Design](design.md)
|
||||
- [Use-Cases](use_cases.md)
|
||||
- [Walkthrough](walkthrough.md)
|
||||
- [Operate](operate.md)
|
||||
- [Developing](developing.md)
|
||||
- [Integrations](integrations.md)
|
||||
- [FAQ](faq.md)
|
|
@ -1,6 +1,6 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.1.0` version.
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the latest version
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
|
@ -24,11 +24,11 @@ This is the primary and most important part of the `spec` because it describes (
|
|||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP add on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
This is the name of the service to route traffic to. The add on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.10.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -1,53 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.2.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
host: "myhost.com"
|
||||
targetPendingRequests: 100
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `host`
|
||||
|
||||
This is the host to apply this scaling rule to. All incoming requests with this value in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s `Deployment` will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What `Deployment` to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
|
@ -1,53 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.3.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
host: "myhost.com"
|
||||
targetPendingRequests: 100
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `host`
|
||||
|
||||
This is the host to apply this scaling rule to. All incoming requests with this value in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s `Deployment` will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What `Deployment` to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
|
@ -1,73 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.6.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -1,87 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.7.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -1,144 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.8.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -1,136 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.9.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -1,136 +0,0 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `vX.X.X` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -1,20 +1,18 @@
|
|||
# Why build an HTTP add-on?
|
||||
# The Scope of the KEDA HTTP Add On
|
||||
|
||||
Running production HTTP servers in Kubernetes is complicated and involves many pieces of infrastructure. The HTTP Add-on (called the "add-on" hereafter) aims to autoscale these HTTP servers, but does not aim to extend beyond that scope. Generally, this project only aims to do two things:
|
||||
Running production HTTP servers in Kubernetes is complicated and involves many pieces of infrastructure. The HTTP Add On (called the "add on" hereafter) aims to autoscale these HTTP servers, but does not aim to extend beyond that scope. Generally, this project only aims to do two things:
|
||||
|
||||
1. Autoscale arbitrary HTTP servers based on the volume of traffic incoming to it, including to zero.
|
||||
2. Route HTTP traffic from a given source to an arbitrary HTTP server, as far as we need to efficiently accomplish (1).
|
||||
1. Autoscale arbitrary HTTP servers based on the volume of traffic incoming to it, including to zero
|
||||
2. Route HTTP traffic from a given source to an arbitrary HTTP server, as far as we need to efficiently accomplish (1)
|
||||
|
||||
The add-on only provides this functionality to workloads that _opt in_ to it. We provide more detail below.
|
||||
The add on only provides this functionality to workloads that _opt in_ to it. We provide more detail below.
|
||||
|
||||
### Autoscaling HTTP
|
||||
## Autoscaling HTTP
|
||||
|
||||
To autoscale HTTP servers, the HTTP Add-on needs access to metrics that it can report to KEDA, so that KEDA itself can scale the target HTTP server. The mechanism by which the add-on does this is to use an [interceptor](../interceptor) and [external scaler](../scaler). An operator watches for a `HTTPScaledObject` resource and creates these components as necessary.
|
||||
To autoscale HTTP servers, the HTTP Add On needs access to metrics that it can report to KEDA, so that KEDA itself can scale the target HTTP server. The mechanism by which the add on does this is to use an [interceptor](../interceptor) and [external scaler](../scaler). An operator watches for a `HTTPScaledObject` resource and creates these components as necessary.
|
||||
|
||||
The HTTP Add-on only includes the necessary infrastructure to respond to new, modified, or deleted `HTTPScaledObject`s, and when one is created, the add-on only creates the infrastructure needed specifically to accomplish autoscaling.
|
||||
The HTTP Add On only includes the necessary infrastructure to respond to new, modified, or deleted `HTTPScaledObject`s, and when one is created, the add on only creates the infrastructure needed specifically to accomplish autoscaling.
|
||||
|
||||
>As stated above, the current architecture requires an "interceptor", which needs to proxy incoming HTTP requests in order to provide autoscaling metrics. That means the scope of the HTTP Add-on currently needs to include the app's network traffic routing system.
|
||||
>As stated above, the current architecture requires an "interceptor", which needs to proxy incoming HTTP requests in order to provide autoscaling metrics. That means the scope of the HTTP add-on currently needs to include the app's network traffic routing system.
|
||||
|
||||
To learn more, we recommend reading about our [design](design.md) or go through our [FAQ](faq.md).
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -19,23 +19,21 @@ Moving this application to Kubernetes may make sense for several reasons, but th
|
|||
|
||||
If the application _is_ being moved to Kubernetes, you would follow these steps to get it autoscaling and routing with KEDA-HTTP:
|
||||
|
||||
- Create a workload and `Service`
|
||||
- [Install](./install.md) the HTTP Add-on
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the workload and `Service` you created
|
||||
- Create a `Deployment` and `Service`
|
||||
- [Install](./install.md) the HTTP Add On
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the `Deployment` and `Service` you created
|
||||
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale. Any request received by the interceptor with the proper host will be routed to the proper backend.
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale.
|
||||
|
||||
## Current HTTP Server in Kubernetes
|
||||
|
||||
In this use case, an HTTP application is already running in Kubernetes, possibly (but not necessarily) already serving in production to the public internet.
|
||||
|
||||
In this case, the reasoning for adding the HTTP Add-on would be clear - adding autoscaling based on incoming HTTP traffic.
|
||||
In this case, the reasoning for adding the HTTP Add On would be clear - adding autoscaling based on incoming HTTP traffic.
|
||||
|
||||
### How You'd Move This Application to KEDA-HTTP
|
||||
|
||||
Getting the HTTP Add-on working can be done transparently and without downtime to the application:
|
||||
Getting the HTTP add on working can be done transparently and without downtime to the application:
|
||||
|
||||
- [Install](./install.md) the add-on. This step will have no effect on the running application.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the workload that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
|
||||
[Go back to landing page](./)
|
||||
- [Install](./install.md) the add on. This step will have no effect on the running application.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the `Deployment` that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
|
|
|
@ -1,58 +1,49 @@
|
|||
# Getting Started With The HTTP Add-on
|
||||
# Getting Started With The HTTP Add On
|
||||
|
||||
After you've installed KEDA and the HTTP Add-on (this project, we'll call it the "add-on" for short), this document will show you how to get started with an example app.
|
||||
After you've installed KEDA and the HTTP Add On (this project, we'll call it the "add on" for short), this document will show you how to get started with an example app.
|
||||
|
||||
If you haven't installed KEDA and the HTTP Add-on (this project), please do so first. Follow instructions [install.md](./install.md) to complete your installation.
|
||||
|
||||
>Before you continue, make sure that you have your `NAMESPACE` environment variable set to the same value as it was when you installed.
|
||||
If you haven't installed KEDA and the HTTP Add On (this project), please do so first. Follow instructions [install.md](./install.md) to complete your installation. Before you continue, make sure that you have your `NAMESPACE` environment variable set to the same value as it was when you installed.
|
||||
|
||||
## Creating An Application
|
||||
|
||||
You'll need to install a `Deployment` and `Service` first. You'll tell the add-on to begin scaling it up and down after this step. We've provided a [Helm](https://helm.sh) chart in this repository that you can use to try it out. Use this command to create the resources you need.
|
||||
You'll need to install a `Deployment` and `Service` first. You'll tell the add on to begin scaling it up and down after this step. Use the below [Helm](https://helm.sh) command to create the resources you need.
|
||||
|
||||
```console
|
||||
```shell
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
```
|
||||
|
||||
#### xkcd exposed with GatewayAPI
|
||||
Alternatively if you'd like to try the addon along with GatewayAPI, you can install first GatewayAPI CRDs and some GatewayAPI implementation, for example as described in a [section below](#installing-and-using-the-eg-gatewayapi) and install the application as with `httproute=true` which will deploy properly configured `HTTPRoute` too.
|
||||
|
||||
```console
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE} --set httproute=true
|
||||
```
|
||||
|
||||
You'll need to clone the repository to get access to this chart. If you have your own workload and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
|
||||
>If you are running KEDA and the HTTP Add-on in cluster-global mode, you can install the XKCD chart in any namespace you choose. If you do so, make sure you add `--set ingressNamespace=${NAMESPACE}` to the above installation command.
|
||||
You'll need to clone the repository to get access to this chart. If you have your own `Deployment` and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
|
||||
>To remove the app, run `helm delete xkcd -n ${NAMESPACE}`
|
||||
|
||||
## Creating an `HTTPScaledObject`
|
||||
|
||||
You interact with the operator via a CRD called `HTTPScaledObject`. This CRD object instructs interceptors to forward requests for a given host to your app's backing `Service`. To get an example app up and running, read the notes below and then run the subsequent command from the root of this repository.
|
||||
You interact with the operator via a CRD called `HTTPScaledObject`. This CRD object points the To get an example app up and running, read the notes below and then run the subsequent command from the root of this repository.
|
||||
|
||||
```console
|
||||
kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```shell
|
||||
kubectl create -f -n $NAMESPACE examples/v0.0.2/httpscaledobject.yaml
|
||||
```
|
||||
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/v0.10.0/http_scaled_object.md).
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/http_scaled_object.md).
|
||||
|
||||
## Testing Your Installation
|
||||
|
||||
You've now installed a web application and activated autoscaling by creating an `HTTPScaledObject` for it. For autoscaling to work properly, HTTP traffic needs to route through the `Service` that the add-on has set up. You can use `kubectl port-forward` to quickly test things out:
|
||||
You've now installed a web application and activated autoscaling by creating an `HTTPScaledObject` for it. For autoscaling to work properly, HTTP traffic needs to route through the `Service` that the add on has set up. You can use `kubectl port-forward` to quickly test things out:
|
||||
|
||||
```console
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
```shell
|
||||
k port-forward svc/xkcd-interceptor-proxy -n ${NAMESPACE} 8080:80
|
||||
```
|
||||
|
||||
### Routing to the Right `Service`
|
||||
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add-on has created during the installation. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add on has created. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
|
||||
```console
|
||||
keda-add-ons-http-interceptor-proxy
|
||||
```shell
|
||||
<deployment name>-interceptor-proxy
|
||||
```
|
||||
|
||||
>The service will always be a `ClusterIP` type and will be created in the same namespace as the `HTTPScaledObject` you created.
|
||||
|
||||
#### Installing and Using the [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm) Ingress Controller
|
||||
|
||||
As mentioned above, the `Service` that the add-on creates will be inaccessible over the network from outside of your Kubernetes cluster.
|
||||
|
@ -61,7 +52,7 @@ While you can access it via the `kubectl port-forward` command above, we recomme
|
|||
|
||||
First, install the controller using the commands below. These commands use Helm v3. For other installation methods, see the [installation page](https://kubernetes.github.io/ingress-nginx/deploy/).
|
||||
|
||||
```console
|
||||
```shell
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm repo update
|
||||
helm install ingress-nginx ingress-nginx/ingress-nginx -n ${NAMESPACE}
|
||||
|
@ -69,118 +60,6 @@ helm install ingress-nginx ingress-nginx/ingress-nginx -n ${NAMESPACE}
|
|||
|
||||
An [`Ingress`](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource was already created as part of the [xkcd chart](../examples/xkcd/templates/ingress.yaml), so the installed NGINX ingress controller will initialize, detect the `Ingress`, and begin routing to the xkcd interceptor `Service`.
|
||||
|
||||
>NOTE: You may have to create an external service `type: ExternalName` pointing to the interceptor namespace and use it from `Ingress` manifest.
|
||||
|
||||
When you're ready, please run `kubectl get svc -n ${NAMESPACE}`, find the `ingress-nginx-controller` service, and copy and paste its `EXTERNAL-IP`. This is the IP address that your application will be running at on the public internet.
|
||||
|
||||
>Note: you should go further and set your DNS records appropriately and set up a TLS certificate for this IP address. Instructions to do that are out of scope of this document, though.
|
||||
|
||||
#### Installing and Using the [eg](https://gateway.envoyproxy.io/latest/install/install-helm/) GatewayAPI
|
||||
|
||||
Similarly to exposing your service with `Ingress`, you can expose your service with `HTTPRoute` as part of [GatewayAPI](https://github.com/kubernetes-sigs/gateway-api). Following steps describe how to install one of may GatewayAPI implementations - Envoy Gateway.
|
||||
You should install the `xkcd` helm chart with `--set httproute=true` as [explained above](#xkcd-exposed-with-gatewayapi).
|
||||
|
||||
The helm chart is publically available and hosted by DockerHub
|
||||
```console
|
||||
helm install eg oci://docker.io/envoyproxy/gateway-helm --version v1.0.2 -n envoy-gateway-system --create-namespace
|
||||
```
|
||||
Before creating new `Gateway`, wait for Envoy Gateway to become available
|
||||
```console
|
||||
kubectl wait --timeout=5m -n envoy-gateway-system deployment/envoy-gateway --for=condition=Available
|
||||
```
|
||||
Create `GatewayClass` and `Gateway`
|
||||
```console
|
||||
cat << 'EOF' | kubectl apply -f -
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: GatewayClass
|
||||
metadata:
|
||||
name: eg
|
||||
spec:
|
||||
controllerName: gateway.envoyproxy.io/gatewayclass-controller
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
spec:
|
||||
gatewayClassName: eg
|
||||
listeners:
|
||||
- name: http
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
EOF
|
||||
```
|
||||
> 💡 Note the `ExternalName` type `Service` used to route traffic from `Ingress` defined in one namespace to the interceptor `Service` defined in another is not necessary with GatewayAPI.
|
||||
> The GatewayAPI defines [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) to allow `HTTPRoutes` referencing `Services` and other types of backend from different `Namespaces`.
|
||||
|
||||
You can see the IP address for following rest of the document with
|
||||
```console
|
||||
kubectl get gateway -n envoy-gateway-system
|
||||
```
|
||||
For example (your IP will likely differ)
|
||||
```
|
||||
NAME CLASS ADDRESS PROGRAMMED AGE
|
||||
eg eg 172.24.255.201 True 16s
|
||||
```
|
||||
|
||||
### Making an HTTP Request to your App
|
||||
|
||||
Now that you have your application running and your ingress configured, you can issue an HTTP request. To do so, you'll need to know the IP address to request. If you're using an ingress controller, that is the IP of the ingress controller's `Service`. If you're using a "raw" `Service` with `type: LoadBalancer`, that is the IP address of the `Service` itself.
|
||||
|
||||
Regardless, you can use the below `curl` command to make a request to your application:
|
||||
|
||||
```console
|
||||
curl -H "Host: myhost.com" <Your IP>/test
|
||||
```
|
||||
|
||||
>Note the `-H` flag above to specify the `Host` header. This is needed to tell the interceptor how to route the request. If you have a DNS name set up for the IP, you don't need this header.
|
||||
|
||||
You can also use port-forward to interceptor service for making the request:
|
||||
|
||||
```console
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
curl -H "Host: myhost.com" localhost:8080/test
|
||||
```
|
||||
|
||||
### Integrating HTTP Add-On Scaler with other KEDA scalers
|
||||
|
||||
For scenerios where you want to integrate HTTP Add-On scaler with other keda scalers, you can set the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation to true on your `HTTPScaledObject`. The reconciler will then skip the KEDA core ScaledObject creation which will allow you to create your own `ScaledObject` and add HTTP scaler as one of your triggers.
|
||||
|
||||
> 💡 Ensure that your ScaledObject is created with a different name than the `HTTPScaledObject` to ensure your ScaledObject is not removed by the reconciler.
|
||||
|
||||
If you don't know how to set the external scaler in the ScaledObject, you can deploy first your HTTPScaledObject with no annotation set in order to obtain the latest trigger spec to use on your own managed ScaledObject.
|
||||
|
||||
1. Deploy your `HTTPScaledObject` with annotation set to false
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
```
|
||||
|
||||
2. Take copy of the current generated external-push trigger spec on the generated ScaledObject.
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
triggers:
|
||||
- type: external-push
|
||||
metadata:
|
||||
httpScaledObject: YOUR_HTTPSCALEDOBJECT_NAME
|
||||
scalerAddress: keda-add-ons-http-external-scaler.keda:9090
|
||||
```
|
||||
|
||||
3. Apply the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation with `true` and apply the change. This will remove the originally created `ScaledObject` allowing you to create your own.
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "true"
|
||||
```
|
||||
|
||||
4. Add the `external-push` trigger taken from step 2 to your own ScaledObject and apply this.
|
||||
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,24 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -1,13 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
host: myhost.com
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,13 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
host: myhost.com
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,13 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
host: myhost.com
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,14 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,16 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,18 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -1,24 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -1,24 +0,0 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -30,12 +30,6 @@ spec:
|
|||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.args }}
|
||||
args:
|
||||
{{- range $arg := .Values.args }}
|
||||
- {{ $arg }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}-proxy
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: keda-add-ons-http-interceptor-proxy.keda
|
|
@ -1,39 +0,0 @@
|
|||
{{- if .Values.httproute }}
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: eg
|
||||
namespace: envoy-gateway-system
|
||||
hostnames:
|
||||
{{- range .Values.hosts }}
|
||||
- {{ . | toString }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- backendRefs:
|
||||
- kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
namespace: keda
|
||||
port: 8080
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
namespace: keda
|
||||
spec:
|
||||
from:
|
||||
- group: gateway.networking.k8s.io
|
||||
kind: HTTPRoute
|
||||
namespace: {{ .Release.Namespace }}
|
||||
to:
|
||||
- group: ""
|
||||
kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
{{- end }}
|
|
@ -3,21 +3,8 @@ apiVersion: http.keda.sh/v1alpha1
|
|||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
spec:
|
||||
{{- with .Values.hosts }}
|
||||
hosts:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.pathPrefixes }}
|
||||
pathPrefixes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
scalingMetric:
|
||||
concurrency:
|
||||
targetValue: {{ .Values.targetPendingRequests }}
|
||||
scaleTargetRef:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
deployment: {{ include "xkcd.fullname" . }}
|
||||
service: {{ include "xkcd.fullname" . }}
|
||||
port: 8080
|
||||
replicas:
|
||||
|
|
|
@ -5,17 +5,13 @@ metadata:
|
|||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
{{- range .Values.hosts }}
|
||||
- host: {{ . | toString }}
|
||||
http:
|
||||
- http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "xkcd.fullname" $ }}-proxy
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
port:
|
||||
number: 8080
|
||||
{{- end }}
|
||||
number: 80
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "xkcd.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "xkcd.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
|
@ -1,24 +1,10 @@
|
|||
replicaCount: 1
|
||||
hosts:
|
||||
- "myhost.com"
|
||||
- "myhost2.com"
|
||||
pathPrefixes:
|
||||
- "/path1"
|
||||
- "/path2"
|
||||
targetPendingRequests: 200
|
||||
# This is the namespace that the ingress should be installed
|
||||
# into. It should be set to the same namespace as the
|
||||
# KEDA HTTP componentry is installed in. Defaults to the Helm
|
||||
# chart release namespace
|
||||
ingressNamespace:
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/e2e-test-images/agnhost
|
||||
repository: arschles/xkcd
|
||||
pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "2.45"
|
||||
|
||||
args:
|
||||
- netexec
|
||||
tag: "latest"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
|
@ -52,5 +38,8 @@ service:
|
|||
|
||||
autoscaling:
|
||||
http:
|
||||
minReplicas: 0
|
||||
minReplicas: 5
|
||||
maxReplicas: 10
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
|
159
go.mod
159
go.mod
|
@ -1,149 +1,22 @@
|
|||
module github.com/kedacore/http-add-on
|
||||
|
||||
go 1.24.3
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0
|
||||
github.com/kedacore/keda/v2 v2.17.1
|
||||
github.com/go-logr/logr v0.4.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.14.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.32.2
|
||||
k8s.io/apimachinery v0.32.2
|
||||
k8s.io/client-go v1.5.2
|
||||
k8s.io/code-generator v0.32.2
|
||||
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979
|
||||
sigs.k8s.io/controller-runtime v0.19.7
|
||||
sigs.k8s.io/gateway-api v1.2.1
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0
|
||||
)
|
||||
|
||||
replace (
|
||||
// pin k8s.io to v0.31.7 & sigs.k8s.io/controller-runtime to v0.19.7
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.20.1
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.21.1
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.63.0
|
||||
k8s.io/api => k8s.io/api v0.31.7
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.31.7
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.31.7
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.31.7
|
||||
k8s.io/client-go => k8s.io/client-go v0.31.7
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.31.7
|
||||
k8s.io/component-base => k8s.io/component-base v0.31.7
|
||||
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
|
||||
k8s.io/metrics => k8s.io/metrics v0.31.6
|
||||
k8s.io/utils => k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/expr-lang/expr v1.17.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
github.com/labstack/echo/v4 v4.3.0
|
||||
github.com/magefile/mage v1.11.0
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.13.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.33.2
|
||||
google.golang.org/protobuf v1.26.0
|
||||
k8s.io/api v0.20.4
|
||||
k8s.io/apimachinery v0.20.4
|
||||
k8s.io/client-go v0.20.2
|
||||
sigs.k8s.io/controller-runtime v0.8.3
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue