Compare commits
162 Commits
Author | SHA1 | Date |
---|---|---|
|
d032b8b4f1 | |
|
9afe55b5fc | |
|
bf355649c6 | |
|
29a6c2b509 | |
|
dc863c6fcd | |
|
6a6adfb7ac | |
|
2317f75346 | |
|
fe41713ec3 | |
|
17b2af021d | |
|
d2bed33270 | |
|
d891e6e5bd | |
|
30e1694baf | |
|
8f84195862 | |
|
46884e237d | |
|
c34fc522fa | |
|
ee764f97ea | |
|
a99deeeb8b | |
|
30e3ecc2ea | |
|
850678c13a | |
|
616cab02d1 | |
|
6fa0b98608 | |
|
d15f4428cf | |
|
8a6ae99921 | |
|
20478e7f2d | |
|
d7ef9d7b89 | |
|
9b76f367e1 | |
|
d159468a02 | |
|
28974318d0 | |
|
61d52ca769 | |
|
d0e7840e9c | |
|
6d76063e54 | |
|
ca6a4d6c02 | |
|
8e95401e96 | |
|
ddbb17ce0b | |
|
504b37ed69 | |
|
54d0ec6247 | |
|
b7d0a3e035 | |
|
fbb0bc9bca | |
|
f8cde8c47e | |
|
a5f3349b17 | |
|
317d47406e | |
|
87e713c8e7 | |
|
4bfcd9fc6d | |
|
880eeb08eb | |
|
c92a3e2c56 | |
|
55b029b409 | |
|
6a30f1842d | |
|
dd40161cb0 | |
|
15a1dae2d2 | |
|
45f645f19e | |
|
ab140c5198 | |
|
ade3bb090b | |
|
993b7bf242 | |
|
fb3e48b94f | |
|
f5ab058701 | |
|
5d2e0add55 | |
|
48a1881a76 | |
|
65b1d73d80 | |
|
052ffce0a6 | |
|
6abd03203b | |
|
08c811fe20 | |
|
8a1d49050a | |
|
2601d92888 | |
|
30b956b5c8 | |
|
70aa9be886 | |
|
459122382e | |
|
aa8ca2e481 | |
|
ec5109d986 | |
|
846ee80394 | |
|
54671bddeb | |
|
81f7469ad1 | |
|
272cd2d4fc | |
|
98770083ee | |
|
d64a750808 | |
|
1e426128da | |
|
a86b13497e | |
|
cf2c4a98eb | |
|
84a9560331 | |
|
1e27b395bd | |
|
44ac04e4e0 | |
|
8ede929e2f | |
|
c96c106c72 | |
|
166ce15b04 | |
|
cc832ed264 | |
|
e184bc5437 | |
|
9f82ff979d | |
|
30e7571b19 | |
|
b283195843 | |
|
e04c24286e | |
|
f1f2707c63 | |
|
0793ecec5a | |
|
5557cd8741 | |
|
7d1740ab82 | |
|
1c9f7348b8 | |
|
38f50bfb0a | |
|
e3d2e81220 | |
|
7feda00373 | |
|
c5de19c52b | |
|
9366827350 | |
|
0237611b6a | |
|
c7ccd3384e | |
|
f48b7d813c | |
|
3ee0445005 | |
|
473c42c637 | |
|
f7bb9f56f6 | |
|
d0a564a9c1 | |
|
a4f9f39ac5 | |
|
a8c1258267 | |
|
b37046ea47 | |
|
d52daad348 | |
|
e6896c2ea4 | |
|
dbac89dc37 | |
|
5b3063e388 | |
|
a71b194ec5 | |
|
849c62af1b | |
|
652c47f5da | |
|
5839732134 | |
|
14839c3558 | |
|
8b44922940 | |
|
793a48d235 | |
|
448a3deb10 | |
|
513ecb5d74 | |
|
5e7af24783 | |
|
2b037b804b | |
|
adc868ff6a | |
|
55086474c9 | |
|
8935f27f4f | |
|
8608805972 | |
|
2c0a177f60 | |
|
d337c8d4e4 | |
|
6453b18638 | |
|
d9277ab7fb | |
|
98ad59dd13 | |
|
a59daf2452 | |
|
b8f3972522 | |
|
855547a7c6 | |
|
584b890935 | |
|
1aea9bf659 | |
|
e658582052 | |
|
6b0edeefb2 | |
|
3bf119b3de | |
|
ec12921821 | |
|
7908755121 | |
|
15718d165a | |
|
f06fcb9c2a | |
|
900da11265 | |
|
1e1ddb2934 | |
|
a842764b96 | |
|
814f33330d | |
|
214431250a | |
|
1bbbd17c06 | |
|
60a76153e6 | |
|
bd556c794f | |
|
1f2f54c560 | |
|
2b7641fb9e | |
|
8ee27fdf6e | |
|
7f0572987b | |
|
c0b7baac56 | |
|
2fa0be3f25 | |
|
b3519ab615 | |
|
56d295d47d | |
|
6021290239 |
|
@ -3,7 +3,7 @@
|
|||
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
|
||||
#-------------------------------------------------------------------------------------------------------------
|
||||
|
||||
FROM golang:1.20.8
|
||||
FROM golang:1.24.3
|
||||
|
||||
# Avoid warnings by switching to noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
@ -55,7 +55,7 @@ RUN apt-get update \
|
|||
&& go install honnef.co/go/tools/cmd/staticcheck@latest \
|
||||
&& go install golang.org/x/tools/gopls@latest \
|
||||
# Install golangci-lint
|
||||
&& curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2 \
|
||||
&& curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.0 \
|
||||
#
|
||||
# Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user.
|
||||
&& groupadd --gid $USER_GID $USERNAME \
|
||||
|
|
|
@ -52,10 +52,9 @@ body:
|
|||
label: "HTTP Add-on Version"
|
||||
description: "What version of the KEDA HTTP Add-on are you running?"
|
||||
options:
|
||||
- "0.6.0"
|
||||
- "0.5.0"
|
||||
- "0.4.0"
|
||||
- "0.3.0"
|
||||
- "0.10.0"
|
||||
- "0.9.0"
|
||||
- "0.8.0"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
|
@ -65,10 +64,10 @@ body:
|
|||
label: Kubernetes Version
|
||||
description: What version of Kubernetes that are you running?
|
||||
options:
|
||||
- "1.28"
|
||||
- "1.27"
|
||||
- "1.26"
|
||||
- "< 1.26"
|
||||
- "1.32"
|
||||
- "1.31"
|
||||
- "1.30"
|
||||
- "< 1.30"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
|
|
|
@ -8,6 +8,10 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -16,6 +20,10 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: docker
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -24,3 +32,7 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /tmp/etcd # /tmp is mapped to tmpfs in kind's nodes
|
|
@ -3,6 +3,9 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
track_issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -5,18 +5,26 @@ on:
|
|||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -26,9 +34,23 @@ jobs:
|
|||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: canary
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: canary
|
||||
|
|
|
@ -6,10 +6,15 @@ on:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
@ -25,7 +30,7 @@ jobs:
|
|||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -35,13 +40,27 @@ jobs:
|
|||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# Get release information to determine id of the current release
|
||||
- name: Get Release
|
||||
id: get-release-info
|
||||
|
@ -56,9 +75,9 @@ jobs:
|
|||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-http-add-on-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_path: keda-http-add-on-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_name: keda-http-add-on-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
|
||||
# Upload CRD deployment YAML file to GitHub release
|
||||
|
@ -68,7 +87,7 @@ jobs:
|
|||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-http-add-on-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_path: keda-http-add-on-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_name: keda-http-add-on-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
|
|
|
@ -9,6 +9,9 @@ concurrency:
|
|||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e_tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -16,14 +19,14 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.28, v1.27, v1.26]
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.28
|
||||
kindImage: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31
|
||||
- kubernetesVersion: v1.27
|
||||
kindImage: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
|
||||
- kubernetesVersion: v1.26
|
||||
kindImage: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- name: Install prerequisites
|
||||
run: |
|
||||
|
@ -32,17 +35,20 @@ jobs:
|
|||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: cluster
|
||||
config: .github/kind.yaml
|
||||
|
||||
- name: Generate images and push to the cluster
|
||||
run: |
|
||||
|
@ -73,7 +79,7 @@ jobs:
|
|||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Generate images
|
||||
run: |
|
||||
|
@ -82,7 +88,7 @@ jobs:
|
|||
VERSION: ${{ github.sha }}
|
||||
|
||||
arm_e2e_tests:
|
||||
runs-on: ARM64
|
||||
runs-on: http-add-on-e2e
|
||||
needs: arm_image_generation
|
||||
name: Execute e2e test on ARM64 ${{ matrix.kubernetesVersion }}
|
||||
env:
|
||||
|
@ -90,30 +96,29 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.28, v1.27, v1.26]
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.28
|
||||
kindImage: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31
|
||||
- kubernetesVersion: v1.27
|
||||
kindImage: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
|
||||
- kubernetesVersion: v1.26
|
||||
kindImage: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.20"
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: ${{ runner.name }}
|
||||
config: .github/kind.yaml
|
||||
|
||||
- name: Push images to the cluster
|
||||
run: |
|
||||
|
|
|
@ -7,12 +7,15 @@ concurrency:
|
|||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build_scaler:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Scaler
|
||||
|
@ -22,9 +25,9 @@ jobs:
|
|||
|
||||
build_operator:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Operator
|
||||
|
@ -34,9 +37,9 @@ jobs:
|
|||
|
||||
build_interceptor:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Interceptor
|
||||
|
|
|
@ -9,14 +9,18 @@ concurrency:
|
|||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
linkinator:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: JustinBeckwith/linkinator-action@39e601e8efc0c24b7228ec87e43f4b9abebf3b25 # v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- uses: JustinBeckwith/linkinator-action@3d5ba091319fa7b0ac14703761eebb7d100e6f6d # v1
|
||||
with:
|
||||
paths: "**/*.md"
|
||||
markdown: true
|
||||
concurrency: 1
|
||||
retry: true
|
||||
linksToSkip: "https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler"
|
||||
linksToSkip: "https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler,http://opentelemetry-collector.open-telemetry-system:4318,http://opentelemetry-collector.open-telemetry-system:4318/v1/traces, https://www.gnu.org/software/make/"
|
||||
|
|
|
@ -9,11 +9,14 @@ concurrency:
|
|||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
name: validate - ${{ matrix.name }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
container: ghcr.io/kedacore/keda-tools:1.20.8
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
@ -22,7 +25,7 @@ jobs:
|
|||
- runner: ubuntu-latest
|
||||
name: amd64
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
@ -37,13 +40,13 @@ jobs:
|
|||
echo ::set-output name=build_cache::$(go env GOCACHE)
|
||||
|
||||
- name: Go modules cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.mod_cache }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.build_cache }}
|
||||
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
|
||||
|
@ -67,16 +70,16 @@ jobs:
|
|||
run: ARCH=${{ matrix.name }} make test
|
||||
|
||||
statics:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Static Checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4
|
||||
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1
|
||||
go-version: "1.24"
|
||||
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
go-version: "1.20"
|
||||
- name: Get golangci
|
||||
run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2
|
||||
- uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # v3.0.0
|
||||
version: v2.1.0
|
||||
|
|
|
@ -355,3 +355,10 @@ admin/Cargo.lock
|
|||
|
||||
/target
|
||||
.envrc
|
||||
|
||||
# locally generated certs for testing TLS
|
||||
*.crt
|
||||
*.pem
|
||||
*.csr
|
||||
*.srl
|
||||
*.ext
|
||||
|
|
109
.golangci.yml
109
.golangci.yml
|
@ -1,71 +1,74 @@
|
|||
# options for analysis running
|
||||
version: "2"
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
# add the build tags to include e2e tests files
|
||||
build-tags:
|
||||
- e2e
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
- e2e
|
||||
linters:
|
||||
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- typecheck
|
||||
- dupl
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- nolintlint
|
||||
#- rowserrcheck
|
||||
- gofmt
|
||||
- revive
|
||||
- goimports
|
||||
- misspell
|
||||
- bodyclose
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- exportloopref
|
||||
- copyloopvar
|
||||
#- depguard #https://github.com/kedacore/keda/issues/4980
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
#- funlen
|
||||
- gci
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- unparam
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002 # disable excluding of issues about comments from golint
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gomnd
|
||||
- dupl
|
||||
- unparam
|
||||
# Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to
|
||||
# resolve https://github.com/kedacore/keda/issues/4379
|
||||
- path: operator/controllers/http/suite_test.go
|
||||
linters:
|
||||
- gci
|
||||
- path: operator/main.go
|
||||
linters:
|
||||
- gci
|
||||
linters-settings:
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 40
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/kedacore/http-add-on)
|
||||
settings:
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 40
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- dupl
|
||||
- revive
|
||||
- unparam
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/kedacore/http-add-on)
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
# Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to
|
||||
# resolve https://github.com/kedacore/keda/issues/4379
|
||||
- operator/controllers/http/suite_test.go
|
||||
- operator/main.go
|
||||
|
|
|
@ -6,7 +6,8 @@ This page contains a list of organizations who are using KEDA's HTTP Add-on in p
|
|||
|
||||
| Organization | Status | More Information (Blog post, etc.) |
|
||||
| ------------ | ---------| ---------------|
|
||||
|N/A|| N/A|
|
||||
| PropulsionAI ||[PropulsionAI](https://propulsionhq.com) allows you to add AI to your apps, without writing code.|
|
||||
| REWE Digital ||From delivery service to market — [REWE Digital](https://www.rewe-digital.com) strengthens leading technological position of REWE Group in food retail sector. |
|
||||
|
||||
## Become an adopter!
|
||||
|
||||
|
|
108
CHANGELOG.md
108
CHANGELOG.md
|
@ -10,6 +10,10 @@ This changelog keeps track of work items that have been completed and are ready
|
|||
## History
|
||||
|
||||
- [Unreleased](#unreleased)
|
||||
- [v0.10.0](#v0100)
|
||||
- [v0.9.0](#v090)
|
||||
- [v0.8.0](#v080)
|
||||
- [v0.7.0](#v070)
|
||||
- [v0.6.0](#v060)
|
||||
- [v0.5.0](#v050)
|
||||
|
||||
|
@ -21,11 +25,14 @@ This changelog keeps track of work items that have been completed and are ready
|
|||
|
||||
### New
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
- **General**: Add failover service on cold-start ([#1280](https://github.com/kedacore/http-add-on/pull/1280))
|
||||
- **General**: Add configurable tracing support to the interceptor proxy ([#1021](https://github.com/kedacore/http-add-on/pull/1021))
|
||||
- **General**: Allow using HSO and SO with different names ([#1293](https://github.com/kedacore/http-add-on/issues/1293))
|
||||
- **General**: Support profiling for KEDA components ([#4789](https://github.com/kedacore/keda/issues/4789))
|
||||
- **General**: Add possibility to skip TLS verification for upstreams in interceptor ([#1307](https://github.com/kedacore/http-add-on/pull/1307))
|
||||
### Improvements
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
- **Interceptor**: Support HTTPScaledObject scoped timeout ([#813](https://github.com/kedacore/http-add-on/issues/813))
|
||||
|
||||
### Fixes
|
||||
|
||||
|
@ -33,19 +40,102 @@ This changelog keeps track of work items that have been completed and are ready
|
|||
|
||||
### Deprecations
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.10.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Fix infrastructure crashes when deleting ScaledObject while scaling
|
||||
- **General**: Fix kubectl active printcolumn ([#1211](https://github.com/kedacore/http-add-on/issues/1211))
|
||||
- **General**: Support InitialCooldownPeriod for HTTPScaledObject [#1213](https://github.com/kedacore/http-add-on/issues/1213)
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.9.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: Drop support for deprecated field `spec.scaleTargetRef.deployment` ([#1061](https://github.com/kedacore/http-add-on/issues/1061))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support portName in HTTPScaledObject service scaleTargetRef ([#1174](https://github.com/kedacore/http-add-on/issues/1174))
|
||||
- **General**: Support setting multiple TLS certs for different domains on the interceptor proxy ([#1116](https://github.com/kedacore/http-add-on/issues/1116))
|
||||
- **Interceptor**: Add support for for AWS ELB healthcheck probe ([#1198](https://github.com/kedacore/http-add-on/issues/1198))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Align the interceptor metrics env var configuration with the OTEL spec ([#1031](https://github.com/kedacore/http-add-on/issues/1031))
|
||||
- **General**: Include trailing 0 window buckets in RPS calculation ([#1075](https://github.com/kedacore/http-add-on/issues/1075))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Sign images with Cosign ([#1062](https://github.com/kedacore/http-add-on/issues/1062))
|
||||
|
||||
## v0.8.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add configurable TLS on the wire support to the interceptor proxy ([#907](https://github.com/kedacore/http-add-on/issues/907))
|
||||
- **General**: Add support for collecting metrics using a Prometheus compatible endpoint or by sending metrics to an OpenTelemetry's HTTP endpoint ([#910](https://github.com/kedacore/http-add-on/issues/910))
|
||||
- **General**: Propagate HTTPScaledObject labels and annotations to ScaledObject ([#840](https://github.com/kedacore/http-add-on/issues/840))
|
||||
- **General**: Provide support for allowing HTTP scaler to work alongside other core KEDA scalers ([#489](https://github.com/kedacore/http-add-on/issues/489))
|
||||
- **General**: Support aggregation windows ([#882](https://github.com/kedacore/http-add-on/issues/882))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Ensure operator is aware about changes on underlying ScaledObject ([#900](https://github.com/kedacore/http-add-on/issues/900))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
- **General**: Deprecated `targetPendingRequests` in favor of `spec.scalingMetric.*.targetValue` ([#959](https://github.com/kedacore/http-add-on/discussions/959))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Align with the new format of Ingress in the example demo ([#979](https://github.com/kedacore/http-add-on/pull/979))
|
||||
- **General**: Unify loggers ([#958](https://github.com/kedacore/http-add-on/issues/958))
|
||||
|
||||
## v0.7.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: `host` field has been removed in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552)|[#888](https://github.com/kedacore/http-add-on/pull/888))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support any resource which implements `/scale` subresource ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **General**: Improve Scaler reliability adding probes and 3 replicas ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Add new user agent probe ([#862](https://github.com/kedacore/http-add-on/issues/862))
|
||||
- **General**: Fix external scaler getting into bad state when retrieving queue lengths fails. ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
- **General**: Increase ScaledObject polling interval to 15 seconds ([#799](https://github.com/kedacore/http-add-on/issues/799))
|
||||
- **General**: Set forward request RawPath to original request RawPath ([#864](https://github.com/kedacore/http-add-on/issues/864))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
Previously announced deprecation(s):
|
||||
|
||||
- **General**: `host` field deprecated in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
- **General**: Deprecated `KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS` in favor of `KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS` ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
- **General**: Bump golang version ([#853](https://github.com/kedacore/http-add-on/pull/853))
|
||||
|
||||
## v0.6.0
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ K9s integrates Hey, a CLI tool to benchmark HTTP endpoints similar to AB bench.
|
|||
```
|
||||
- You'll need to clone the repository to get access to this chart. If you have your own Deployment and Service installed, you can go right to creating an HTTPScaledObject. We use the provided sample HTTPScaledObject -
|
||||
```
|
||||
$ kubectl create -n $NAMESPACE -f examples/v0.3.0/httpscaledobject.yaml
|
||||
$ kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```
|
||||
- Testing Your Installation using k9s:
|
||||
```
|
||||
|
|
86
Makefile
86
Makefile
|
@ -32,6 +32,28 @@ GO_LDFLAGS="-X github.com/kedacore/http-add-on/pkg/build.version=${VERSION} -X g
|
|||
GIT_COMMIT ?= $(shell git rev-list -1 HEAD)
|
||||
GIT_COMMIT_SHORT ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
COSIGN_FLAGS ?= -y -a GIT_HASH=${GIT_COMMIT} -a GIT_VERSION=${VERSION} -a BUILD_DATE=${DATE}
|
||||
|
||||
define DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
DNS.2 = *.keda
|
||||
DNS.3 = *.interceptor-tls-test-ns
|
||||
endef
|
||||
export DOMAINS
|
||||
|
||||
define ABC_DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = abc
|
||||
endef
|
||||
export ABC_DOMAINS
|
||||
|
||||
# Build targets
|
||||
|
||||
build-operator:
|
||||
|
@ -45,13 +67,36 @@ build-scaler:
|
|||
|
||||
build: build-operator build-interceptor build-scaler
|
||||
|
||||
# generate certs for local unit and e2e tests
|
||||
rootca-test-certs:
|
||||
mkdir -p certs
|
||||
openssl req -x509 -nodes -new -sha256 -days 1024 -newkey rsa:2048 -keyout certs/RootCA.key -out certs/RootCA.pem -subj "/C=US/CN=Keda-Root-CA"
|
||||
openssl x509 -outform pem -in certs/RootCA.pem -out certs/RootCA.crt
|
||||
|
||||
test-certs: rootca-test-certs
|
||||
echo "$$DOMAINS" > certs/domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/tls.key -out certs/tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=keda.local"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/domains.ext -out certs/tls.crt
|
||||
echo "$$ABC_DOMAINS" > certs/abc_domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/abc.tls.key -out certs/abc.tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=abc"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/abc.tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/abc_domains.ext -out certs/abc.tls.crt
|
||||
|
||||
clean-test-certs:
|
||||
rm -r certs || true
|
||||
|
||||
# Test targets
|
||||
test: fmt vet
|
||||
test: fmt vet test-certs
|
||||
go test ./...
|
||||
|
||||
e2e-test:
|
||||
go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-setup:
|
||||
ONLY_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-local:
|
||||
SKIP_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
# Docker targets
|
||||
docker-build-operator:
|
||||
DOCKER_BUILDKIT=1 docker build . -t ${IMAGE_OPERATOR_VERSIONED_TAG} -t ${IMAGE_OPERATOR_SHA_TAG} -f operator/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
|
@ -83,15 +128,15 @@ publish-scaler-multiarch:
|
|||
|
||||
publish-multiarch: publish-operator-multiarch publish-interceptor-multiarch publish-scaler-multiarch
|
||||
|
||||
release: manifests kustomize ## Produce new KEDA Http Add-on release in keda-http-add-on-$(VERSION).yaml file.
|
||||
release: manifests kustomize ## Produce new KEDA Http Add-on release in keda-add-ons-http-$(VERSION).yaml file.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
$(KUSTOMIZE) build config/default > keda-http-add-on-$(VERSION).yaml
|
||||
$(KUSTOMIZE) build config/crd > keda-http-add-on-$(VERSION)-crds.yaml
|
||||
$(KUSTOMIZE) build config/default > keda-add-ons-http-$(VERSION).yaml
|
||||
$(KUSTOMIZE) build config/crd > keda-add-ons-http-$(VERSION)-crds.yaml
|
||||
|
||||
# Development
|
||||
|
||||
|
@ -114,6 +159,14 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust
|
|||
verify-manifests: ## Verify manifests are up to date.
|
||||
./hack/verify-manifests.sh
|
||||
|
||||
sign-images: ## Sign KEDA images published on GitHub Container Registry
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_SHA_TAG)
|
||||
|
||||
mockgen: ## Generate mock implementations of Go interfaces.
|
||||
./hack/update-mockgen.sh
|
||||
|
||||
|
@ -134,19 +187,37 @@ pre-commit: ## Run static-checks.
|
|||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.12.0
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/kustomize/kustomize/v5
|
||||
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
deploy: manifests kustomize ## Deploy to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/scaledobject.yaml --group keda.sh --kind ScaledObject --name interceptor --version v1alpha1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/proxy.service.yaml --kind Service --name interceptor-proxy --version v1
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name scaler --version v1
|
||||
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
|
||||
|
@ -154,8 +225,3 @@ deploy: manifests kustomize ## Deploy to the K8s cluster specified in ~/.kube/co
|
|||
|
||||
undeploy:
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
|
||||
kind-load:
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-operator:${VERSION}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-interceptor:${VERSION}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-scaler:${VERSION}
|
||||
|
|
|
@ -22,7 +22,7 @@ The KEDA HTTP Add-on allows Kubernetes users to automatically scale their HTTP s
|
|||
|
||||
| 🚧 **Project status: beta** 🚧|
|
||||
|---------------------------------------------|
|
||||
| :loudspeaker: **KEDA is actively relying on community contributions to help grow & maintain the add-on. The KEDA maintainers are assisting the community to evolve the add-on but not directly responsible for it.** Feel free to [open a new discussion](https://github.com/kedacore/http-add-on/discussions/new/choose) in case of questions.<br/><br/>⚠ The HTTP Add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/latest). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support. |
|
||||
| :loudspeaker: **KEDA is actively relying on community contributions to help grow & maintain the add-on. The KEDA maintainers are assisting the community to evolve the add-on but not directly responsible for it.** Feel free to [open a new discussion](https://github.com/kedacore/http-add-on/discussions/new/choose) in case of questions.<br/><br/>⚠ The HTTP Add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/latest). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support.<br/><br/>:bulb: For production-ready needs, you can consider using the [Kedify HTTP Scaler](https://kedify.io/scalers/http), a commercial alternative offering robust and reliable scaling for KEDA. |
|
||||
|
||||
## HTTP Autoscaling Made Simple
|
||||
|
||||
|
|
|
@ -20,7 +20,17 @@ It should not include every single change but solely what matters to our custome
|
|||
|
||||
Add the new released version to the list in `KEDA Version` dropdown in [2_bug_report.yml](https://github.com/kedacore/http-add-on/blob/main/.github/ISSUE_TEMPLATE/2_bug_report.yml).
|
||||
|
||||
## 4: Create a new GitHub release
|
||||
## 4. Update documentation references to current version
|
||||
|
||||
Update the links to current version within the file `walkthrough.md`
|
||||
|
||||
> ```console
|
||||
> kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
> ```
|
||||
|
||||
> >If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](THE REFERENCE).
|
||||
|
||||
## 5: Create a new GitHub release
|
||||
|
||||
[Create a new release](https://github.com/kedacore/http-add-on/releases/new) on the GitHub releases page, using your new release number.
|
||||
|
||||
|
@ -30,7 +40,7 @@ The release description should be a short to medium length summary of what has c
|
|||
|
||||
After you create the new release, automation in a GitHub action will build and deploy new container images.
|
||||
|
||||
## 5: Submit a PR to the [Helm Charts Repository](https://github.com/kedacore/charts)
|
||||
## 6: Submit a PR to the [Helm Charts Repository](https://github.com/kedacore/charts)
|
||||
|
||||
The scope of the changes you'll need to make to the Helm chart vary, but the below list is the minimum set of fields to change:
|
||||
|
||||
|
@ -54,7 +64,7 @@ images:
|
|||
tag: 1.2.3
|
||||
```
|
||||
|
||||
>Note: The container images generated by CI/CD in step 2 will have the same tag as the tag you created in the release, minus the `v` prefix. You can always see what images created by going to the container registry page for the [interceptor](https://github.com/orgs/kedacore/packages/container/package/http-add-on-interceptor), [operator](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator) or [scaler](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler)
|
||||
>Note: The container images generated by CI/CD in step 2 will have the same tag as the tag you created in the release, minus the `v` prefix. You can always see what images created by going to the container registry page for the [interceptor](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor), [operator](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator) or [scaler](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler)
|
||||
|
||||
|
||||
Once you've made changes to the chart, here's how to do submit the change to the charts repository:
|
||||
|
@ -66,11 +76,11 @@ Once you've made changes to the chart, here's how to do submit the change to the
|
|||
|
||||
After your PR is merged, you've completed the release. Congratulations! You can optionally write a blog post about it; see the next section if you're interested.
|
||||
|
||||
## 6: Publish release on Artifact Hub
|
||||
## 7: Publish release on Artifact Hub
|
||||
|
||||
Publish release on Artifact Hub by creating a new version in [kedacore/external-scalers](https://github.com/kedacore/external-scalers/tree/main/artifacthub/add-ons-http).
|
||||
|
||||
## 7: Write a blog post on the documentation site (_optional_)
|
||||
## 8: Write a blog post on the documentation site (_optional_)
|
||||
|
||||
If you believe that your release is large enough to warrant a blog post on the [keda.sh/blog](https://keda.sh/blog/) site, please go to [github.com/kedacore/keda-docs](https://github.com/kedacore/keda-docs) and submit a new PR with a blog article about the release.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: httpscaledobjects.http.keda.sh
|
||||
spec:
|
||||
group: http.keda.sh
|
||||
|
@ -17,15 +17,12 @@ spec:
|
|||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.scaleTargetRef.deploymentName
|
||||
name: ScaleTargetDeploymentName
|
||||
- jsonPath: .status.targetWorkload
|
||||
name: TargetWorkload
|
||||
type: string
|
||||
- jsonPath: .spec.scaleTargetRef
|
||||
name: ScaleTargetServiceName
|
||||
- jsonPath: .status.targetService
|
||||
name: TargetService
|
||||
type: string
|
||||
- jsonPath: .spec.scaleTargetRef
|
||||
name: ScaleTargetPort
|
||||
type: integer
|
||||
- jsonPath: .spec.replicas.min
|
||||
name: MinReplicas
|
||||
type: integer
|
||||
|
@ -35,7 +32,7 @@ spec:
|
|||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- jsonPath: .status.conditions[?(@.type=="HTTPScaledObjectIsReady")].status
|
||||
- jsonPath: .status.conditions[?(@.reason=="HTTPScaledObjectIsReady")].status
|
||||
name: Active
|
||||
type: string
|
||||
name: v1alpha1
|
||||
|
@ -44,43 +41,70 @@ spec:
|
|||
description: HTTPScaledObject is the Schema for the httpscaledobjects API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HTTPScaledObjectSpec defines the desired state of HTTPScaledObject
|
||||
properties:
|
||||
host:
|
||||
description: (optional) (deprecated) The host to route. All requests
|
||||
which the "Host" header matches .spec.host and the Request Target
|
||||
matches any .spec.pathPrefixes will be routed to the Service and
|
||||
Port specified in the scaleTargetRef. The .spec.host field is mutually
|
||||
exclusive with the .spec.hosts field.
|
||||
type: string
|
||||
coldStartTimeoutFailoverRef:
|
||||
description: (optional) The name of the failover service to route
|
||||
HTTP requests to when the target is not available
|
||||
properties:
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
timeoutSeconds:
|
||||
default: 30
|
||||
description: The timeout in seconds to wait before routing to
|
||||
the failover service (Default 30)
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
hosts:
|
||||
description: (optional) The hosts to route. All requests which the
|
||||
"Host" header matches any .spec.hosts and the Request Target matches
|
||||
any .spec.pathPrefixes will be routed to the Service and Port specified
|
||||
in the scaleTargetRef. The .spec.hosts field is mutually exclusive
|
||||
with the .spec.host field.
|
||||
description: |-
|
||||
The hosts to route. All requests which the "Host" header
|
||||
matches any .spec.hosts (and the Request Target matches any
|
||||
.spec.pathPrefixes) will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
initialCooldownPeriod:
|
||||
description: (optional) Initial period before scaling
|
||||
format: int32
|
||||
type: integer
|
||||
pathPrefixes:
|
||||
description: (optional) The paths to route. All requests which the
|
||||
Request Target matches any .spec.pathPrefixes and the "Host" header
|
||||
matches any .spec.hosts will be routed to the Service and Port specified
|
||||
in the scaleTargetRef. The .spec.hosts field is mutually exclusive
|
||||
with the .spec.host field. When this field is null, any path is
|
||||
matched.
|
||||
description: |-
|
||||
The paths to route. All requests which the Request Target matches any
|
||||
.spec.pathPrefixes (and the "Host" header matches any .spec.hosts)
|
||||
will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
@ -99,33 +123,87 @@ spec:
|
|||
type: integer
|
||||
type: object
|
||||
scaleTargetRef:
|
||||
description: The name of the deployment to route HTTP requests to
|
||||
(and to autoscale).
|
||||
description: |-
|
||||
The name of the deployment to route HTTP requests to (and to autoscale).
|
||||
Including validation as a requirement to define either the PortName or the Port
|
||||
properties:
|
||||
deployment:
|
||||
description: The name of the deployment to scale according to
|
||||
HTTP traffic
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
required:
|
||||
- deployment
|
||||
- port
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
scaledownPeriod:
|
||||
description: (optional) Cooldown period value
|
||||
format: int32
|
||||
type: integer
|
||||
scalingMetric:
|
||||
description: (optional) Configuration for the metric used for scaling
|
||||
properties:
|
||||
concurrency:
|
||||
description: Scaling based on concurrent requests for a given
|
||||
target
|
||||
properties:
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
type: object
|
||||
requestRate:
|
||||
description: Scaling based the average rate during an specific
|
||||
time window for a given target
|
||||
properties:
|
||||
granularity:
|
||||
default: 1s
|
||||
description: Time granularity for rate calculation
|
||||
type: string
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
window:
|
||||
default: 1m
|
||||
description: Time window for rate calculation
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
targetPendingRequests:
|
||||
description: (optional) Target metric value
|
||||
description: (optional) DEPRECATED (use ScalingMetric instead) Target
|
||||
metric value
|
||||
format: int32
|
||||
type: integer
|
||||
timeouts:
|
||||
description: (optional) Timeouts that override the global ones
|
||||
properties:
|
||||
conditionWait:
|
||||
description: How long to wait for the backing workload to have
|
||||
1 or more replicas before connecting and sending the HTTP request
|
||||
(Default is set by the KEDA_CONDITION_WAIT_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
responseHeader:
|
||||
description: How long to wait between when the HTTP request is
|
||||
sent to the backing app and when response headers need to arrive
|
||||
(Default is set by the KEDA_RESPONSE_HEADER_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- scaleTargetRef
|
||||
type: object
|
||||
|
@ -160,12 +238,6 @@ spec:
|
|||
type:
|
||||
description: Type of condition
|
||||
enum:
|
||||
- Created
|
||||
- Terminated
|
||||
- Error
|
||||
- Pending
|
||||
- Terminating
|
||||
- Unknown
|
||||
- Ready
|
||||
type: string
|
||||
required:
|
||||
|
@ -173,6 +245,12 @@ spec:
|
|||
- type
|
||||
type: object
|
||||
type: array
|
||||
targetService:
|
||||
description: TargetService reflects details about the scaled service.
|
||||
type: string
|
||||
targetWorkload:
|
||||
description: TargetWorkload reflects details about the scaled workload.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
|
|
@ -6,7 +6,7 @@ resources:
|
|||
- ../operator
|
||||
- ../scaler
|
||||
namespace: keda
|
||||
namePrefix: keda-http-add-on-
|
||||
namePrefix: keda-add-ons-http-
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
|
|
|
@ -23,6 +23,10 @@ spec:
|
|||
containers:
|
||||
- name: interceptor
|
||||
image: ghcr.io/kedacore/http-add-on-interceptor
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_CURRENT_NAMESPACE
|
||||
value: "keda"
|
||||
|
@ -38,7 +42,7 @@ spec:
|
|||
value: "500ms"
|
||||
- name: KEDA_CONDITION_WAIT_TIMEOUT
|
||||
value: "20s"
|
||||
- name: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS
|
||||
- name: KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS
|
||||
value: "1000"
|
||||
- name: KEDA_HTTP_FORCE_HTTP2
|
||||
value: "false"
|
||||
|
@ -55,6 +59,8 @@ spec:
|
|||
containerPort: 9090
|
||||
- name: proxy
|
||||
containerPort: 8080
|
||||
- name: metrics
|
||||
containerPort: 2223
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
env:
|
||||
- name: OTEL_PROM_EXPORTER_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_PROM_EXPORTER_PORT
|
||||
value: "2223"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318"
|
||||
- name: OTEL_METRIC_EXPORT_INTERVAL
|
||||
value: "1"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
|
||||
value: "http/protobuf"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318/v1/traces"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
||||
value: "true"
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- scaledobject.yaml
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
minReplicaCount: 1
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
containerPort: 8443
|
||||
env:
|
||||
- name: KEDA_HTTP_PROXY_TLS_ENABLED
|
||||
value: "true"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_PATH
|
||||
value: "/certs/tls.crt"
|
||||
- name: KEDA_HTTP_PROXY_TLS_KEY_PATH
|
||||
value: "/certs/tls.key"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS
|
||||
value: "/additional-certs"
|
||||
- name: KEDA_HTTP_PROXY_TLS_PORT
|
||||
value: "8443"
|
||||
volumeMounts:
|
||||
- readOnly: true
|
||||
mountPath: "/certs"
|
||||
name: certs
|
||||
- readOnly: true
|
||||
mountPath: "/additional-certs/abc-certs"
|
||||
name: abc-certs
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: keda-tls
|
||||
- name: abc-certs
|
||||
secret:
|
||||
secretName: abc-certs
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- proxy.service.yaml
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-proxy
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
protocol: TCP
|
||||
port: 8443
|
||||
targetPort: proxy-tls
|
|
@ -6,6 +6,7 @@ resources:
|
|||
- role_binding.yaml
|
||||
- admin.service.yaml
|
||||
- proxy.service.yaml
|
||||
- metrics.service.yaml
|
||||
- service_account.yaml
|
||||
- scaledobject.yaml
|
||||
configurations:
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-metrics
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 2223
|
||||
targetPort: metrics
|
|
@ -5,9 +5,17 @@ metadata:
|
|||
name: interceptor
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
- ""
|
||||
resources:
|
||||
- deployments
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: TransformerConfig
|
||||
namePrefix:
|
||||
- apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
- kind: ScaledObject
|
||||
path: spec/scaleTargetRef/name
|
||||
- apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
- kind: ScaledObject
|
||||
path: spec/triggers/metadata/scalerAddress
|
||||
|
|
|
@ -24,9 +24,12 @@ spec:
|
|||
image: ghcr.io/kedacore/http-add-on-operator
|
||||
args:
|
||||
- --leader-elect
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_SERVICE
|
||||
value: "keda-http-add-on-external-scaler"
|
||||
value: "keda-add-ons-http-external-scaler"
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_OPERATOR_NAMESPACE
|
||||
|
|
|
@ -3,6 +3,7 @@ kind: Deployment
|
|||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
|
@ -22,15 +23,19 @@ spec:
|
|||
containers:
|
||||
- name: scaler
|
||||
image: ghcr.io/kedacore/http-add-on-scaler
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_DEPLOYMENT
|
||||
value: "keda-http-add-on-interceptor"
|
||||
value: "keda-add-ons-http-interceptor"
|
||||
- name: KEDA_HTTP_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_SERVICE
|
||||
value: "keda-http-add-on-interceptor-admin"
|
||||
value: "keda-add-ons-http-interceptor-admin"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_STREAM_INTERVAL_MS
|
||||
|
@ -42,10 +47,18 @@ spec:
|
|||
grpc:
|
||||
port: 9090
|
||||
service: liveness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
grpc:
|
||||
port: 9090
|
||||
service: readiness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
# TODO(pedrotorres): set better default values avoiding overcommitment
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 1
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
|
@ -12,14 +12,6 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- http.keda.sh
|
||||
resources:
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
<img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=bd8914ff-fcda-4c0c-ab57-6fc671ae6cff" style="display:none;" />
|
|
@ -23,7 +23,7 @@ The [operator](../operator) runs inside the Kubernetes namespace to which they'r
|
|||
|
||||
- Update an internal routing table that maps incoming HTTP hostnames to internal applications.
|
||||
- Furnish this routing table information to interceptors so that they can properly route requests.
|
||||
- Create a [`ScaledObject`](https://keda.sh/docs/2.3/concepts/scaling-deployments/#scaledobject-spec) for the `Deployment` specified in the `HTTPScaledObject` resource.
|
||||
- Create a [`ScaledObject`](https://keda.sh/docs/latest/concepts/scaling-deployments/#scaledobject-spec) for the `Deployment` specified in the `HTTPScaledObject` resource.
|
||||
|
||||
When the `HTTPScaledObject` is deleted, the operator reverses all of the aforementioned actions.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Why does this project route HTTP requests?
|
||||
|
||||
In order to autoscale a `Deployment`, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
In order to autoscale a workload, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
|
||||
## How is this project similar or different from [Osiris](https://github.com/deislabs/osiris)?
|
||||
|
||||
|
@ -13,7 +13,7 @@ Osiris and KEDA-HTTP have similar features:
|
|||
|
||||
However, Osiris and KEDA-HTTP differ in several ways:
|
||||
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment`, `StatefulSet`, `/scale` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- The KEDA-HTTP operator can automatically deploy and configure networking and compute resources necessary for an HTTP application to autoscale. Osiris does not have this functionality.
|
||||
- Osiris is currently archived in GitHub.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
The HTTP Add-on is highly modular and, as expected, builds on top of KEDA core. Below are some additional components:
|
||||
|
||||
- **Operator** - watches for `ScaledHTTPObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Operator** - watches for `HTTPScaledObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Scaler** - communicates scaling-related metrics to KEDA. By default, the operator will install this for you as necessary.
|
||||
- **Interceptor** - a cluster-internal proxy that proxies incoming HTTP requests, communicating HTTP queue size metrics to the scaler, and holding requests in a temporary request queue when there are not yet any available app `Pod`s ready to serve. By default, the operator will install this for you as necessary.
|
||||
>There is [pending work](https://github.com/kedacore/http-add-on/issues/354) that may eventually make this component optional.
|
||||
|
@ -19,9 +19,9 @@ Before you install any of these components, you need to install KEDA. Below are
|
|||
>This document will rely on environment variables such as `${NAMESPACE}` to indicate a value you should customize and provide to the relevant command. In the below `helm install` command, `${NAMESPACE}` should be the namespace you'd like to install KEDA into.
|
||||
|
||||
```console
|
||||
$ helm repo add kedacore https://kedacore.github.io/charts
|
||||
$ helm repo update
|
||||
$ helm install keda kedacore/keda --namespace ${NAMESPACE} --create-namespace
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm install keda kedacore/keda --namespace ${NAMESPACE} --create-namespace
|
||||
```
|
||||
|
||||
>The above command installs KEDA in cluster-global mode. Add `--set watchNamespace=<target namespace>` to install KEDA in namespaced mode.
|
||||
|
@ -49,7 +49,7 @@ There are a few values that you can pass to the above `helm install` command by
|
|||
>If you want to install the latest build of the HTTP Add-on, set `version` to `canary`:
|
||||
|
||||
```console
|
||||
$ helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE} --set images.tag=canary
|
||||
helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE} --set images.tag=canary
|
||||
```
|
||||
|
||||
For an exhaustive list of configuration options, see the official HTTP Add-on chart [values.yaml file](https://github.com/kedacore/charts/blob/master/http-add-on/values.yaml).
|
||||
|
@ -59,25 +59,30 @@ For an exhaustive list of configuration options, see the official HTTP Add-on ch
|
|||
Local clusters like [Microk8s](https://microk8s.io/) offer in-cluster image registries. These are popular tools to speed up and ease local development. If you use such a tool for local development, we recommend that you use and push your images to its local registry. When you do, you'll want to set your `images.*` variables to the address of the local registry. In the case of MicroK8s, that address is `localhost:32000` and the `helm install` command would look like the following:
|
||||
|
||||
```console
|
||||
$ helm repo add kedacore https://kedacore.github.io/charts
|
||||
$ helm repo update
|
||||
$ helm pull kedacore/keda-add-ons-http --untar --untardir ./charts
|
||||
$ helm upgrade kedahttp ./charts/keda-add-ons-http \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm pull kedacore/keda-add-ons-http --untar --untardir ./charts
|
||||
helm upgrade kedahttp ./charts/keda-add-ons-http \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
```
|
||||
|
||||
## Compatibility Table
|
||||
|
||||
| HTTP Add-On version | KEDA version | Kubernetes version |
|
||||
|---------------------|--------------|--------------------|
|
||||
| 0.6.0 | v2.12 | v1.26 - v1.28 |
|
||||
| 0.5.1 | v2.10 | v1.24 - v1.26 |
|
||||
| 0.5.0 | v2.9 | v1.23 - v1.25 |
|
||||
| HTTP Add-On version | KEDA version | Kubernetes version |
|
||||
|---------------------|-------------------|--------------------|
|
||||
| main | v2.16 | v1.30 - v1.32 |
|
||||
| 0.10.0 | v2.16 | v1.30 - v1.32 |
|
||||
| 0.9.0 | v2.16 | v1.29 - v1.31 |
|
||||
| 0.8.0 | v2.14 | v1.27 - v1.29 |
|
||||
| 0.7.0 | v2.13 | v1.27 - v1.29 |
|
||||
| 0.6.0 | v2.12 | v1.26 - v1.28 |
|
||||
| 0.5.1 | v2.10 | v1.24 - v1.26 |
|
||||
| 0.5.0 | v2.9 | v1.23 - v1.25 |
|
||||
|
||||
## Next Steps
|
||||
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
# Integrations
|
||||
|
||||
## Istio
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Proxy Service in Virtual Service:**
|
||||
|
||||
- Within the Istio virtual service definition, add a proxy service as a route destination.
|
||||
- Set the host of this proxy service to `keda-add-ons-http-interceptor-proxy`` (the KEDA HTTP Addon interceptor service).
|
||||
- Set the port to `8080`` (the default interceptor port).
|
||||
|
||||
**Example yaml**
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: example
|
||||
namespace: default
|
||||
spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: keda-add-ons-http-interceptor-proxy
|
||||
port: 8080
|
||||
```
|
||||
|
||||
2. **Namespace Alignment:**
|
||||
|
||||
- Ensure that both the KEDA HTTP Addon and the Istio virtual service are deployed within the same Kubernetes namespace. This ensures proper communication between the components.
|
||||
|
||||
### Behavior
|
||||
|
||||
- When a user makes a request, the Istio virtual service routes it to the KEDA HTTP Addon interceptor service.
|
||||
- The interceptor service captures request metrics and relays them to the KEDA scaler component.
|
||||
- Based on these metrics and scaling rules defined in the KEDA configuration, the KEDA scaler automatically scales the target workload (e.g., a deployment) up or down (including scaling to zero).
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
1. **Error: `context marked done while waiting for workload reach > 0 replicas`**
|
||||
|
||||
- This error indicates that the `KEDA_CONDITION_WAIT_TIMEOUT` value (default: 20 seconds) might be too low. The workload scaling process may not be complete within this timeframe.
|
||||
- To increase the timeout:
|
||||
- If using Helm, adjust the `interceptor.replicas.waitTimeout`` parameter (see reference below).
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139)
|
||||
|
||||
2. **502 Errors with POST Requests:**
|
||||
|
||||
- You might encounter 502 errors during POST requests when the request is routed through the interceptor service. This could be due to insufficient timeout settings.
|
||||
- To adjust timeout parameters:
|
||||
- If using Helm, modify the following parameters (see reference below):
|
||||
- `KEDA_HTTP_CONNECT_TIMEOUT`
|
||||
- `KEDA_RESPONSE_HEADER_TIMEOUT`
|
||||
- `KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT`
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152)
|
||||
|
||||
3. **Immediate Scaling Down to Zero:**
|
||||
- If `minReplica` is set to 0 in the HTTPScaledObject, the application will immediately scale down to 0.
|
||||
- There's currently no built-in mechanism to delay this initial scaling.
|
||||
- A PR is in progress to add this support: [https://github.com/kedacore/keda/pull/5478](https://github.com/kedacore/keda/pull/5478)
|
||||
- As a workaround, keep `minReplica` initially as 1 and update it to 0 after the desired delay.
|
||||
|
||||
---
|
||||
|
||||
## Azure Front Door
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Service Setup in Front Door:**
|
||||
- Set up Azure Front Door to route traffic to your AKS cluster.
|
||||
- Ensure that the `Origin Host` header matches the actual AKS host. Front Door enforces case-sensitive routing, so configure the `Origin Host` exactly as the AKS host name.
|
||||
|
||||
2. **KEDA HTTP Add-on Integration:**
|
||||
- Use an `HTTPScaledObject` to manage scaling based on incoming traffic.
|
||||
- Front Door should route traffic to the KEDA HTTP Add-on interceptor service in your AKS cluster.
|
||||
|
||||
3. **Case-Sensitive Hostnames:**
|
||||
- Be mindful that Azure Front Door treats the `Origin Host` header in a case-sensitive manner.
|
||||
- Ensure consistency between the AKS ingress hostname (e.g., `foo.bar.com`) and Front Door configuration.
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
- **404 Error for Hostnames with Different Case:**
|
||||
- Requests routed with inconsistent casing (e.g., `foo.Bar.com` vs. `foo.bar.com`) will trigger 404 errors. Make sure the `Origin Host` header matches the AKS ingress host exactly.
|
||||
- If you encounter errors like `PANIC=value method k8s.io/apimachinery/pkg/types.NamespacedName.MarshalLog called using nil *NamespacedName pointer`, verify the `Origin Host` header configuration.
|
||||
|
||||
### Expected Behavior
|
||||
|
||||
- Azure Front Door routes traffic to AKS based on a case-sensitive host header.
|
||||
- The KEDA HTTP Add-on scales the workload in response to traffic, based on predefined scaling rules.
|
||||
|
||||
|
||||
---
|
|
@ -0,0 +1,62 @@
|
|||
# Configuring metrics for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Exportable metrics:
|
||||
* **Pending request count** - the number of pending requests for a given host.
|
||||
* **Total request count** - the total number of requests for a given host with method, path and response code attributes.
|
||||
|
||||
There are currently 2 supported methods for exposing metrics from the interceptor proxy service - via a Prometheus compatible metrics endpoint or by pushing metrics to a OTEL HTTP collector.
|
||||
|
||||
### Configuring the Prometheus compatible metrics endpoint
|
||||
When configured, the interceptor proxy can expose metrics on a Prometheus compatible endpoint.
|
||||
|
||||
This endpoint can be enabled by setting the `OTEL_PROM_EXPORTER_ENABLED` environment variable to `true` on the interceptor deployment (`true` by default) and by setting `OTEL_PROM_EXPORTER_PORT` to an unused port for the endpoint to be made avaialble on (`2223` by default).
|
||||
|
||||
### Configuring the OTEL HTTP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
The OTEL exporter can be enabled by setting the `OTEL_EXPORTER_OTLP_METRICS_ENABLED` environment variable to `true` on the interceptor deployment (`false` by default). When enabled the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must also be configured so the exporter knows what collector to send the metrics to (e.g. http://opentelemetry-collector.open-telemetry-system:4318).
|
||||
|
||||
If you need to provide any headers such as authentication details in order to utilise your OTEL collector you can add them into the `OTEL_EXPORTER_OTLP_HEADERS` environment variable. The frequency at which the metrics are exported can be configured by setting `OTEL_METRIC_EXPORT_INTERVAL` to the number of seconds you require between each export interval (`30` by default).
|
||||
|
||||
# Configuring TLS for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
The interceptor proxy has the ability to run both a HTTP and HTTPS server simultaneously to allow you to scale workloads that use either protocol. By default, the interceptor proxy will only serve over HTTP, but this behavior can be changed by configuring the appropriate environment variables on the deployment.
|
||||
|
||||
The TLS server can be enabled by setting the environment variable `KEDA_HTTP_PROXY_TLS_ENABLED` to `true` on the interceptor deployment (`false` by default). The TLS server will start on port `8443` by default, but this can be configured by setting `KEDA_HTTP_PROXY_TLS_PORT` to your desired port number. The TLS server will require valid TLS certificates to start, the path to the certificates can be configured via the `KEDA_HTTP_PROXY_TLS_CERT_PATH` and `KEDA_HTTP_PROXY_TLS_KEY_PATH` environment variables (`/certs/tls.crt` and `/certs/tls.key` by default).
|
||||
|
||||
For setting multiple TLS certs, set `KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS` with comma-separated list of directories that will be recursively searched for any valid cert/key pairs. Currently, two naming patterns are supported
|
||||
* `XYZ.crt` + `XYZ.key` - this is a convention when using Kubernetes Secrets of type tls
|
||||
* `XYZ.pem` + `XYZ-key.pem`
|
||||
|
||||
To disable certificate chain verification, set `KEDA_HTTP_PROXY_TLS_SKIP_VERIFY` to `false`
|
||||
|
||||
The matching between certs and requests is performed during the TLS ClientHelo message, where the SNI service name is compared to SANs provided in each cert and the first matching cert will be used for the rest of the TLS handshake.
|
||||
# Configuring tracing for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Supported Exporters:
|
||||
* **console** - The console exporter is useful for development and debugging tasks, and is the simplest to set up.
|
||||
* **http/protobuf** - To send trace data to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
* * **grpc** - To configure exporter to send trace data over gRPC connection to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
|
||||
### Configuring tracing with console exporter
|
||||
|
||||
To enable tracing with the console exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `console` (`console` by default). Other protocols include (`http/protobuf` and `grpc`).
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to `"http://localhost:4318/v1/traces"` (`"http://localhost:4318/v1/traces"` by default).
|
||||
|
||||
|
||||
### Configuring tracing with OTLP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
To enable tracing with otlp exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `otlphttp` (`console` by default). Other protocols include (`http/protobuf` and `grpc`)
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to the collector to send the traces to (e.g. http://opentelemetry-collector.open-telemetry-system:4318/v1/traces) (`"http://localhost:4318/v1/traces"` by default).
|
||||
NOTE: full path is required to be set including <scheme><url><port><path>
|
||||
|
||||
|
||||
Optional variables
|
||||
`OTEL_EXPORTER_OTLP_HEADERS` - To pass any extra headers to the spans to utilise your OTEL collector e.g. authentication details (`"key1=value1,key2=value2"`)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_INSECURE` - To send traces to the tracing via HTTP rather than HTTPS (`false` by default)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - The batcher timeout in seconds to send batch of data points (`5` by default)
|
||||
|
||||
### Configuring Service Failover
|
|
@ -7,5 +7,7 @@ Here is an overview of detailed documentation:
|
|||
- [Design](design.md)
|
||||
- [Use-Cases](use_cases.md)
|
||||
- [Walkthrough](walkthrough.md)
|
||||
- [Operate](operate.md)
|
||||
- [Developing](developing.md)
|
||||
- [Integrations](integrations.md)
|
||||
- [FAQ](faq.md)
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.10.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,73 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.6.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -0,0 +1,87 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.7.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -0,0 +1,144 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.8.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.9.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `vX.X.X` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -19,11 +19,11 @@ Moving this application to Kubernetes may make sense for several reasons, but th
|
|||
|
||||
If the application _is_ being moved to Kubernetes, you would follow these steps to get it autoscaling and routing with KEDA-HTTP:
|
||||
|
||||
- Create a `Deployment` and `Service`
|
||||
- Create a workload and `Service`
|
||||
- [Install](./install.md) the HTTP Add-on
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the `Deployment` and `Service` you created
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the workload and `Service` you created
|
||||
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale.
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale. Any request received by the interceptor with the proper host will be routed to the proper backend.
|
||||
|
||||
## Current HTTP Server in Kubernetes
|
||||
|
||||
|
@ -36,6 +36,6 @@ In this case, the reasoning for adding the HTTP Add-on would be clear - adding a
|
|||
Getting the HTTP Add-on working can be done transparently and without downtime to the application:
|
||||
|
||||
- [Install](./install.md) the add-on. This step will have no effect on the running application.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the `Deployment` that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the workload that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -11,10 +11,17 @@ If you haven't installed KEDA and the HTTP Add-on (this project), please do so f
|
|||
You'll need to install a `Deployment` and `Service` first. You'll tell the add-on to begin scaling it up and down after this step. We've provided a [Helm](https://helm.sh) chart in this repository that you can use to try it out. Use this command to create the resources you need.
|
||||
|
||||
```console
|
||||
$ helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
```
|
||||
|
||||
You'll need to clone the repository to get access to this chart. If you have your own `Deployment` and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
#### xkcd exposed with GatewayAPI
|
||||
Alternatively if you'd like to try the addon along with GatewayAPI, you can install first GatewayAPI CRDs and some GatewayAPI implementation, for example as described in a [section below](#installing-and-using-the-eg-gatewayapi) and install the application as with `httproute=true` which will deploy properly configured `HTTPRoute` too.
|
||||
|
||||
```console
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE} --set httproute=true
|
||||
```
|
||||
|
||||
You'll need to clone the repository to get access to this chart. If you have your own workload and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
|
||||
>If you are running KEDA and the HTTP Add-on in cluster-global mode, you can install the XKCD chart in any namespace you choose. If you do so, make sure you add `--set ingressNamespace=${NAMESPACE}` to the above installation command.
|
||||
|
||||
|
@ -25,29 +32,27 @@ You'll need to clone the repository to get access to this chart. If you have you
|
|||
You interact with the operator via a CRD called `HTTPScaledObject`. This CRD object instructs interceptors to forward requests for a given host to your app's backing `Service`. To get an example app up and running, read the notes below and then run the subsequent command from the root of this repository.
|
||||
|
||||
```console
|
||||
$ kubectl create -n $NAMESPACE -f examples/v0.3.0/httpscaledobject.yaml
|
||||
kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```
|
||||
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/v0.3.0/http_scaled_object.md).
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/v0.10.0/http_scaled_object.md).
|
||||
|
||||
## Testing Your Installation
|
||||
|
||||
You've now installed a web application and activated autoscaling by creating an `HTTPScaledObject` for it. For autoscaling to work properly, HTTP traffic needs to route through the `Service` that the add-on has set up. You can use `kubectl port-forward` to quickly test things out:
|
||||
|
||||
```console
|
||||
$ kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:80
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
```
|
||||
|
||||
### Routing to the Right `Service`
|
||||
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add-on has created. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add-on has created during the installation. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
|
||||
```console
|
||||
keda-add-ons-http-interceptor-proxy
|
||||
```
|
||||
|
||||
>This is installed by the [Helm chart](https://github.com/kedacore/charts/tree/master/http-add-on) as a `ClusterIP` `Service` by default.
|
||||
|
||||
#### Installing and Using the [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm) Ingress Controller
|
||||
|
||||
As mentioned above, the `Service` that the add-on creates will be inaccessible over the network from outside of your Kubernetes cluster.
|
||||
|
@ -64,10 +69,64 @@ helm install ingress-nginx ingress-nginx/ingress-nginx -n ${NAMESPACE}
|
|||
|
||||
An [`Ingress`](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource was already created as part of the [xkcd chart](../examples/xkcd/templates/ingress.yaml), so the installed NGINX ingress controller will initialize, detect the `Ingress`, and begin routing to the xkcd interceptor `Service`.
|
||||
|
||||
>NOTE: You may have to create an external service `type: ExternalName` pointing to the interceptor namespace and use it from `Ingress` manifest.
|
||||
|
||||
When you're ready, please run `kubectl get svc -n ${NAMESPACE}`, find the `ingress-nginx-controller` service, and copy and paste its `EXTERNAL-IP`. This is the IP address that your application will be running at on the public internet.
|
||||
|
||||
>Note: you should go further and set your DNS records appropriately and set up a TLS certificate for this IP address. Instructions to do that are out of scope of this document, though.
|
||||
|
||||
#### Installing and Using the [eg](https://gateway.envoyproxy.io/latest/install/install-helm/) GatewayAPI
|
||||
|
||||
Similarly to exposing your service with `Ingress`, you can expose your service with `HTTPRoute` as part of [GatewayAPI](https://github.com/kubernetes-sigs/gateway-api). Following steps describe how to install one of may GatewayAPI implementations - Envoy Gateway.
|
||||
You should install the `xkcd` helm chart with `--set httproute=true` as [explained above](#xkcd-exposed-with-gatewayapi).
|
||||
|
||||
The helm chart is publically available and hosted by DockerHub
|
||||
```console
|
||||
helm install eg oci://docker.io/envoyproxy/gateway-helm --version v1.0.2 -n envoy-gateway-system --create-namespace
|
||||
```
|
||||
Before creating new `Gateway`, wait for Envoy Gateway to become available
|
||||
```console
|
||||
kubectl wait --timeout=5m -n envoy-gateway-system deployment/envoy-gateway --for=condition=Available
|
||||
```
|
||||
Create `GatewayClass` and `Gateway`
|
||||
```console
|
||||
cat << 'EOF' | kubectl apply -f -
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: GatewayClass
|
||||
metadata:
|
||||
name: eg
|
||||
spec:
|
||||
controllerName: gateway.envoyproxy.io/gatewayclass-controller
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
spec:
|
||||
gatewayClassName: eg
|
||||
listeners:
|
||||
- name: http
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
EOF
|
||||
```
|
||||
> 💡 Note the `ExternalName` type `Service` used to route traffic from `Ingress` defined in one namespace to the interceptor `Service` defined in another is not necessary with GatewayAPI.
|
||||
> The GatewayAPI defines [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) to allow `HTTPRoutes` referencing `Services` and other types of backend from different `Namespaces`.
|
||||
|
||||
You can see the IP address for following rest of the document with
|
||||
```console
|
||||
kubectl get gateway -n envoy-gateway-system
|
||||
```
|
||||
For example (your IP will likely differ)
|
||||
```
|
||||
NAME CLASS ADDRESS PROGRAMMED AGE
|
||||
eg eg 172.24.255.201 True 16s
|
||||
```
|
||||
|
||||
### Making an HTTP Request to your App
|
||||
|
||||
Now that you have your application running and your ingress configured, you can issue an HTTP request. To do so, you'll need to know the IP address to request. If you're using an ingress controller, that is the IP of the ingress controller's `Service`. If you're using a "raw" `Service` with `type: LoadBalancer`, that is the IP address of the `Service` itself.
|
||||
|
@ -75,9 +134,53 @@ Now that you have your application running and your ingress configured, you can
|
|||
Regardless, you can use the below `curl` command to make a request to your application:
|
||||
|
||||
```console
|
||||
curl -H "Host: myhost.com" <Your IP>
|
||||
curl -H "Host: myhost.com" <Your IP>/test
|
||||
```
|
||||
|
||||
>Note the `-H` flag above to specify the `Host` header. This is needed to tell the interceptor how to route the request. If you have a DNS name set up for the IP, you don't need this header.
|
||||
|
||||
You can also use port-forward to interceptor service for making the request:
|
||||
|
||||
```console
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
curl -H "Host: myhost.com" localhost:8080/test
|
||||
```
|
||||
|
||||
### Integrating HTTP Add-On Scaler with other KEDA scalers
|
||||
|
||||
For scenerios where you want to integrate HTTP Add-On scaler with other keda scalers, you can set the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation to true on your `HTTPScaledObject`. The reconciler will then skip the KEDA core ScaledObject creation which will allow you to create your own `ScaledObject` and add HTTP scaler as one of your triggers.
|
||||
|
||||
> 💡 Ensure that your ScaledObject is created with a different name than the `HTTPScaledObject` to ensure your ScaledObject is not removed by the reconciler.
|
||||
|
||||
If you don't know how to set the external scaler in the ScaledObject, you can deploy first your HTTPScaledObject with no annotation set in order to obtain the latest trigger spec to use on your own managed ScaledObject.
|
||||
|
||||
1. Deploy your `HTTPScaledObject` with annotation set to false
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
```
|
||||
|
||||
2. Take copy of the current generated external-push trigger spec on the generated ScaledObject.
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
triggers:
|
||||
- type: external-push
|
||||
metadata:
|
||||
httpScaledObject: YOUR_HTTPSCALEDOBJECT_NAME
|
||||
scalerAddress: keda-add-ons-http-external-scaler.keda:9090
|
||||
```
|
||||
|
||||
3. Apply the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation with `true` and apply the change. This will remove the originally created `ScaledObject` allowing you to create your own.
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "true"
|
||||
```
|
||||
|
||||
4. Add the `external-push` trigger taken from step 2 to your own ScaledObject and apply this.
|
||||
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,16 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -0,0 +1,18 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}-proxy
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: keda-add-ons-http-interceptor-proxy.keda
|
|
@ -0,0 +1,39 @@
|
|||
{{- if .Values.httproute }}
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: eg
|
||||
namespace: envoy-gateway-system
|
||||
hostnames:
|
||||
{{- range .Values.hosts }}
|
||||
- {{ . | toString }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- backendRefs:
|
||||
- kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
namespace: keda
|
||||
port: 8080
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
namespace: keda
|
||||
spec:
|
||||
from:
|
||||
- group: gateway.networking.k8s.io
|
||||
kind: HTTPRoute
|
||||
namespace: {{ .Release.Namespace }}
|
||||
to:
|
||||
- group: ""
|
||||
kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
{{- end }}
|
|
@ -7,9 +7,17 @@ spec:
|
|||
hosts:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
targetPendingRequests: {{ .Values.targetPendingRequests }}
|
||||
{{- with .Values.pathPrefixes }}
|
||||
pathPrefixes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
scalingMetric:
|
||||
concurrency:
|
||||
targetValue: {{ .Values.targetPendingRequests }}
|
||||
scaleTargetRef:
|
||||
deployment: {{ include "xkcd.fullname" . }}
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: {{ include "xkcd.fullname" . }}
|
||||
port: 8080
|
||||
replicas:
|
||||
|
|
|
@ -2,11 +2,10 @@ apiVersion: networking.k8s.io/v1
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
namespace: {{ .Values.ingressNamespace | default .Release.Namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
{{- range .Values.hosts }}
|
||||
- host: {{ . | toString }}
|
||||
|
@ -16,7 +15,7 @@ spec:
|
|||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
name: {{ include "xkcd.fullname" $ }}-proxy
|
||||
port:
|
||||
number: 8080
|
||||
{{- end }}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "xkcd.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "xkcd.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
|
@ -2,6 +2,9 @@ replicaCount: 1
|
|||
hosts:
|
||||
- "myhost.com"
|
||||
- "myhost2.com"
|
||||
pathPrefixes:
|
||||
- "/path1"
|
||||
- "/path2"
|
||||
targetPendingRequests: 200
|
||||
# This is the namespace that the ingress should be installed
|
||||
# into. It should be set to the same namespace as the
|
||||
|
|
197
go.mod
197
go.mod
|
@ -1,102 +1,149 @@
|
|||
module github.com/kedacore/http-add-on
|
||||
|
||||
go 1.20
|
||||
go 1.24.3
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.4
|
||||
github.com/go-logr/zapr v1.2.4
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0
|
||||
github.com/kedacore/keda/v2 v2.12.0
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0
|
||||
github.com/kedacore/keda/v2 v2.17.1
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo/v2 v2.12.1
|
||||
github.com/onsi/gomega v1.28.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tj/assert v0.0.3
|
||||
go.uber.org/zap v1.26.0
|
||||
golang.org/x/sync v0.3.0
|
||||
google.golang.org/grpc v1.58.2
|
||||
google.golang.org/protobuf v1.31.0
|
||||
k8s.io/api v0.28.2
|
||||
k8s.io/apimachinery v0.28.2
|
||||
k8s.io/client-go v0.28.2
|
||||
k8s.io/code-generator v0.28.2
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.16.2
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.1.1
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.14.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.32.2
|
||||
k8s.io/apimachinery v0.32.2
|
||||
k8s.io/client-go v1.5.2
|
||||
k8s.io/code-generator v0.32.2
|
||||
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979
|
||||
sigs.k8s.io/controller-runtime v0.19.7
|
||||
sigs.k8s.io/gateway-api v1.2.1
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0
|
||||
)
|
||||
|
||||
replace (
|
||||
// pin k8s.io to v0.31.7 & sigs.k8s.io/controller-runtime to v0.19.7
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.20.1
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.21.1
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.63.0
|
||||
k8s.io/api => k8s.io/api v0.31.7
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.31.7
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.31.7
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.31.7
|
||||
k8s.io/client-go => k8s.io/client-go v0.31.7
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.31.7
|
||||
k8s.io/component-base => k8s.io/component-base v0.31.7
|
||||
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
|
||||
k8s.io/metrics => k8s.io/metrics v0.31.6
|
||||
k8s.io/utils => k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/antonmedv/expr v1.15.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/expr-lang/expr v1.17.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/spf13/cobra v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.15.0 // indirect
|
||||
golang.org/x/oauth2 v0.12.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/term v0.12.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.28.2 // indirect
|
||||
k8s.io/component-base v0.28.2 // indirect
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d // indirect
|
||||
knative.dev/pkg v0.0.0-20230925085724-0efc1bce35a9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.14.0 // indirect
|
||||
sigs.k8s.io/kustomize/cmd/config v0.11.3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
439
go.sum
439
go.sum
|
@ -1,78 +1,79 @@
|
|||
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
|
||||
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
|
||||
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
|
||||
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
|
||||
github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/expr-lang/expr v1.17.4 h1:qhTVftZ2Z3WpOEXRHWErEl2xf1Kq011MnQmWgLq06CY=
|
||||
github.com/expr-lang/expr v1.17.4/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
||||
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c h1:8AzxBXzXPCzl8EEsgWirPPDA5ru+bm5dVEV/KkpAKnE=
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d h1:um9/pc7tKMINFfP1eE7Wv6PRGXlcCSJkVajF7KJw3uQ=
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0 h1:nq9lQ5I71Heg2lRb2/+szuIWKY3Y73d8YKyXyN91WzU=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.6 h1:3xi/Cafd1NaoEnS/yDssIiuVeDVywU0QdFGl3aQaQHM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.6/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
|
@ -81,26 +82,27 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
|||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kedacore/keda/v2 v2.12.0 h1:yvonNclPzmqIW9S9/aVMjeBnCsS6a85MXtXrK9nma9M=
|
||||
github.com/kedacore/keda/v2 v2.12.0/go.mod h1:68D+BFRWbvQqv96YEejVyWaZmFPSeYdYttl5dTg3PaI=
|
||||
github.com/kedacore/keda/v2 v2.17.1 h1:UomWibe5aO7COMUyF+jVM9fuENf4/wcSpiui65tF+d0=
|
||||
github.com/kedacore/keda/v2 v2.17.1/go.mod h1:yKJMF8zuLI2xXvZtgfcbW+V8k3VO4a4R/fucy3z5lC8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -110,203 +112,198 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/
|
|||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA=
|
||||
github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c=
|
||||
github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
||||
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0 h1:xrAb/G80z/l5JL6XlmUMSD1i6W8vXkWrLfmkD3w/zZo=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0/go.mod h1:UREJtqioFu5awNaCR8aEx7MfJROFlAWb6lPaJFbHaG0=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
|
||||
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb h1:Isk1sSH7bovx8Rti2wZK0UZF6oraBDK74uoyLEEVFN0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
||||
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
|
||||
k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
|
||||
k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU=
|
||||
k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg=
|
||||
k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
|
||||
k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
|
||||
k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY=
|
||||
k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY=
|
||||
k8s.io/code-generator v0.28.2 h1:u47guga1rCWLnEnffF09p+cqj8B20oHOLoQ1lb1HGtQ=
|
||||
k8s.io/code-generator v0.28.2/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A=
|
||||
k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E=
|
||||
k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d h1:/CFeJBjBrZvHX09rObS2+2iEEDevMWYc1v3aIYAjIYI=
|
||||
k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/pkg v0.0.0-20230925085724-0efc1bce35a9 h1:0GZJ6ZTAkS59goRuMVc5t9vNMmegbggrB/A7pTm2USE=
|
||||
knative.dev/pkg v0.0.0-20230925085724-0efc1bce35a9/go.mod h1:HagK8mQdSVdmm2xSYMpl4iCLjhxPoPJdY1/NuuEvAVw=
|
||||
sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU=
|
||||
sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.14.0 h1:6+QLmXXA8X4eDM7ejeaNUyruA1DDB3PVIjbpVhDOJRA=
|
||||
sigs.k8s.io/kustomize/api v0.14.0/go.mod h1:vmOXlC8BcmcUJQjiceUbcyQ75JBP6eg8sgoyzc+eLpQ=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.11.3 h1:QLukJoe/0sjhUrtylmBS1MXhvkdLtbpHJvAClXDra54=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.11.3/go.mod h1:ENTZ8Ds12gewUpdxF5PJq/9qPVQFd5VPvMIL11wrBIU=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.1.1 h1:iq+1k9LaQupKcbUVLX8yvE62W6u0B5bXtyCmF5YUcH8=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.1.1/go.mod h1:7kno0pHkt7k3Vg4/0IjpMxx1bzCi08gziU2CTa6UuvM=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/api v0.31.7 h1:wSo59nXpVXmaB6hgNVJCrdnKtyYoutIgpNNBbROBd2U=
|
||||
k8s.io/api v0.31.7/go.mod h1:vLUha4nXRUGtQdayzsmjur0lQApK/sJSxyR/fwuujcU=
|
||||
k8s.io/apiextensions-apiserver v0.31.7 h1:FujQQl6iKuCF5nX4GIQy3ClvftU8MqadAyi9oQ6ZeAw=
|
||||
k8s.io/apiextensions-apiserver v0.31.7/go.mod h1:YmNzYECWFYy8n9R0oxtVAD9JYILZnZCNziYrpUQhKeI=
|
||||
k8s.io/apimachinery v0.31.7 h1:fpV8yLerIZFAkj0of66+i1ArPv/Btf9KO6Aulng7RRw=
|
||||
k8s.io/apimachinery v0.31.7/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.7 h1:2+LFJc6Xw6rhmpDbN1NSmhoFLWBh62cPG/P+IfaTSGY=
|
||||
k8s.io/client-go v0.31.7/go.mod h1:hrrMorBQ17LqzoKIxKg5cSWvmWl94EwA/MUF0Mkf+Zw=
|
||||
k8s.io/code-generator v0.31.7 h1:8BU7n+pK8td2600IiqH6EgxuiWbwVA1+uTOwIJ/nTUA=
|
||||
k8s.io/code-generator v0.31.7/go.mod h1:1oSRo6cJxwSCghcOFGsh53TKkUQ5ZgYoK7LBCFbhHDg=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea h1:ukJPq9MzFTEH/Sei5MSVnSE8+7NSCKixCDZPd6p4ohw=
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea/go.mod h1:tFayQbi6t4+5HXuEGLOGvILW228Q7uaJp/FYEgbjJ3A=
|
||||
sigs.k8s.io/controller-runtime v0.19.6 h1:fuq53qTLQ7aJTA7aNsklNnu7eQtSFqJUomOyM+phPLk=
|
||||
sigs.k8s.io/controller-runtime v0.19.6/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||
sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM=
|
||||
sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
|
||||
sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0 h1:D3uASwjHWHmNiEHu3pPJBJMBIsb+auFvHrHql3HAarU=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0/go.mod h1:29Vvdl26PidPLUDi7nfjYa/I0wHBkwCZp15Nlcc4y98=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 h1:MWtRRDWCwQEeW2rnJTqJMuV6Agy56P53SkbVoJpN7wA=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0/go.mod h1:XuuZiQF7WdcvZzEYyNww9A0p3LazCKeJmCjeycN8e1I=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -22,7 +22,7 @@ limitations under the License.
|
|||
package hack
|
||||
|
||||
import (
|
||||
_ "github.com/golang/mock/mockgen"
|
||||
_ "go.uber.org/mock/mockgen"
|
||||
_ "k8s.io/code-generator"
|
||||
_ "sigs.k8s.io/kustomize/kustomize/v5"
|
||||
)
|
||||
|
|
|
@ -23,25 +23,17 @@ CODEGEN_PKG="${CODEGEN_PKG:-$(go list -f '{{ .Dir }}' -m k8s.io/code-generator 2
|
|||
SCRIPT_ROOT="$(dirname "${BASH_SOURCE[0]}")/.."
|
||||
OUTPUT_BASE="$(mktemp -d)"
|
||||
|
||||
|
||||
GO_PACKAGE='github.com/kedacore/http-add-on'
|
||||
GEN_SUFFIX='operator/generated'
|
||||
API_SUFFIX='operator/apis'
|
||||
|
||||
# (JorTurFer): We need to add execution permission to the old file to continue using it until the new way works
|
||||
# supporting all the scenarios (eg: not all the public types within the same types.go file)
|
||||
chmod +x "${CODEGEN_PKG}/generate-internal-groups.sh"
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
bash "${CODEGEN_PKG}/generate-groups.sh" \
|
||||
'client,informer,lister' \
|
||||
"${GO_PACKAGE}/${GEN_SUFFIX}" \
|
||||
"${GO_PACKAGE}/${API_SUFFIX}" \
|
||||
'http:v1alpha1' \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
--output-base "${OUTPUT_BASE}"
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/operator/apis"
|
||||
|
||||
|
||||
rm -fR "${SCRIPT_ROOT}/${GEN_SUFFIX}"
|
||||
mv -nT "${OUTPUT_BASE}/${GO_PACKAGE}/${GEN_SUFFIX}" "${SCRIPT_ROOT}/${GEN_SUFFIX}"
|
||||
|
||||
rm -fR "${OUTPUT_BASE}"
|
||||
kube::codegen::gen_client \
|
||||
--with-watch \
|
||||
--output-dir "${SCRIPT_ROOT}/operator/generated" \
|
||||
--output-pkg "github.com/kedacore/http-add-on/operator/generated" \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/operator/apis"
|
||||
|
|
|
@ -25,7 +25,7 @@ GEN='operator/generated'
|
|||
CPY='hack/boilerplate.go.txt'
|
||||
PKG='mock'
|
||||
|
||||
MOCKGEN_PKG="${MOCKGEN_PKG:-$(go list -f '{{ .Dir }}' -m github.com/golang/mock 2>/dev/null)/mockgen}"
|
||||
MOCKGEN_PKG="${MOCKGEN_PKG:-$(go list -f '{{ .Dir }}' -m go.uber.org/mock 2>/dev/null)/mockgen}"
|
||||
MOCKGEN="${OUTPUT}/mockgen"
|
||||
go build -o "${MOCKGEN}" "${MOCKGEN_PKG}"
|
||||
|
||||
|
|
|
@ -44,6 +44,6 @@ if [[ $ret -eq 0 ]]
|
|||
then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run '${SCRIPT_ROOT}/hack/update-codegen.sh'"
|
||||
echo "${DIFFROOT} is out of date. Please run 'make codegen'"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM --platform=${BUILDPLATFORM} ghcr.io/kedacore/keda-tools:1.20.8 as builder
|
||||
FROM --platform=${BUILDPLATFORM} ghcr.io/kedacore/keda-tools:1.24.3 as builder
|
||||
WORKDIR /workspace
|
||||
COPY go.* .
|
||||
RUN go mod download
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
)
|
||||
|
||||
// Metrics is the configuration for configuring metrics in the interceptor.
|
||||
type Metrics struct {
|
||||
// Sets whether or not to enable the Prometheus metrics exporter
|
||||
OtelPrometheusExporterEnabled bool `envconfig:"OTEL_PROM_EXPORTER_ENABLED" default:"true"`
|
||||
// Sets the port which the Prometheus compatible metrics endpoint should be served on
|
||||
OtelPrometheusExporterPort int `envconfig:"OTEL_PROM_EXPORTER_PORT" default:"2223"`
|
||||
// Sets whether or not to enable the OTEL metrics exporter
|
||||
OtelHTTPExporterEnabled bool `envconfig:"OTEL_EXPORTER_OTLP_METRICS_ENABLED" default:"false"`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
// newly created config. Returns nil and a non-nil error if parsing failed
|
||||
func MustParseMetrics() *Metrics {
|
||||
ret := new(Metrics)
|
||||
envconfig.MustProcess("", ret)
|
||||
return ret
|
||||
}
|
|
@ -24,11 +24,31 @@ type Serving struct {
|
|||
// ConfigMapCacheRsyncPeriod is the time interval
|
||||
// for the configmap informer to rsync the local cache.
|
||||
ConfigMapCacheRsyncPeriod time.Duration `envconfig:"KEDA_HTTP_SCALER_CONFIG_MAP_INFORMER_RSYNC_PERIOD" default:"60m"`
|
||||
// The interceptor has an internal process that periodically fetches the state
|
||||
// Deprecated: The interceptor has an internal process that periodically fetches the state
|
||||
// of deployment that is running the servers it forwards to.
|
||||
//
|
||||
// This is the interval (in milliseconds) representing how often to do a fetch
|
||||
DeploymentCachePollIntervalMS int `envconfig:"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS" default:"250"`
|
||||
// The interceptor has an internal process that periodically fetches the state
|
||||
// of endpoints that is running the servers it forwards to.
|
||||
//
|
||||
// This is the interval (in milliseconds) representing how often to do a fetch
|
||||
EndpointsCachePollIntervalMS int `envconfig:"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS" default:"250"`
|
||||
// ProxyTLSEnabled is a flag to specify whether the interceptor proxy should
|
||||
// be running using a TLS enabled server
|
||||
ProxyTLSEnabled bool `envconfig:"KEDA_HTTP_PROXY_TLS_ENABLED" default:"false"`
|
||||
// TLSCertPath is the path to read the certificate file from for the TLS server
|
||||
TLSCertPath string `envconfig:"KEDA_HTTP_PROXY_TLS_CERT_PATH" default:"/certs/tls.crt"`
|
||||
// TLSKeyPath is the path to read the private key file from for the TLS server
|
||||
TLSKeyPath string `envconfig:"KEDA_HTTP_PROXY_TLS_KEY_PATH" default:"/certs/tls.key"`
|
||||
// TLSCertStorePaths is a comma separated list of paths to read the certificate/key pairs for the TLS server
|
||||
TLSCertStorePaths string `envconfig:"KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS" default:""`
|
||||
// TLSSkipVerify is a boolean flag to specify whether the interceptor should skip TLS verification for upstreams
|
||||
TLSSkipVerify bool `envconfig:"KEDA_HTTP_PROXY_TLS_SKIP_VERIFY" default:"false"`
|
||||
// TLSPort is the port that the server should serve on if TLS is enabled
|
||||
TLSPort int `envconfig:"KEDA_HTTP_PROXY_TLS_PORT" default:"8443"`
|
||||
// ProfilingAddr if not empty, pprof will be available on this address, assuming host:port here
|
||||
ProfilingAddr string `envconfig:"PROFILING_BIND_ADDRESS" default:""`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
|
|
|
@ -16,9 +16,9 @@ type Timeouts struct {
|
|||
// ResponseHeaderTimeout is how long to wait between when the HTTP request
|
||||
// is sent to the backing app and when response headers need to arrive
|
||||
ResponseHeader time.Duration `envconfig:"KEDA_RESPONSE_HEADER_TIMEOUT" default:"500ms"`
|
||||
// DeploymentReplicas is how long to wait for the backing deployment
|
||||
// WorkloadReplicas is how long to wait for the backing workload
|
||||
// to have 1 or more replicas before connecting and sending the HTTP request.
|
||||
DeploymentReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
|
||||
WorkloadReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
|
||||
// ForceHTTP2 toggles whether to try to force HTTP2 for all requests
|
||||
ForceHTTP2 bool `envconfig:"KEDA_HTTP_FORCE_HTTP2" default:"false"`
|
||||
// MaxIdleConns is the max number of connections that can be idle in the
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
)
|
||||
|
||||
// Tracing is the configuration for configuring tracing through the interceptor.
|
||||
type Tracing struct {
|
||||
// States whether tracing should be enabled, False by default
|
||||
Enabled bool `envconfig:"OTEL_EXPORTER_OTLP_TRACES_ENABLED" default:"false"`
|
||||
// Sets what tracing export to use, must be one of: console,http/protobuf, grpc
|
||||
Exporter string `envconfig:"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL" default:"console"`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
// newly created config. Returns nil and a non-nil error if parsing failed
|
||||
func MustParseTracing() *Tracing {
|
||||
ret := new(Tracing)
|
||||
envconfig.MustProcess("", ret)
|
||||
return ret
|
||||
}
|
|
@ -2,16 +2,36 @@ package config
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
func Validate(srvCfg Serving, timeoutsCfg Timeouts) error {
|
||||
deplCachePollInterval := time.Duration(srvCfg.DeploymentCachePollIntervalMS) * time.Millisecond
|
||||
if timeoutsCfg.DeploymentReplicas < deplCachePollInterval {
|
||||
func Validate(srvCfg *Serving, timeoutsCfg Timeouts, lggr logr.Logger) error {
|
||||
// TODO(jorturfer): delete this for v0.9.0
|
||||
_, deploymentEnvExist := os.LookupEnv("KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS")
|
||||
_, endpointsEnvExist := os.LookupEnv("KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS")
|
||||
if deploymentEnvExist && endpointsEnvExist {
|
||||
return fmt.Errorf(
|
||||
"deployment replicas timeout (%s) should not be less than the Deployment Cache Poll Interval (%s)",
|
||||
timeoutsCfg.DeploymentReplicas,
|
||||
deplCachePollInterval,
|
||||
"%s and %s are mutual exclusive",
|
||||
"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS",
|
||||
"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS",
|
||||
)
|
||||
}
|
||||
if deploymentEnvExist && !endpointsEnvExist {
|
||||
srvCfg.EndpointsCachePollIntervalMS = srvCfg.DeploymentCachePollIntervalMS
|
||||
srvCfg.DeploymentCachePollIntervalMS = 0
|
||||
lggr.Info("WARNING: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS has been deprecated in favor of KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS and wil be removed for v0.9.0")
|
||||
}
|
||||
// END TODO
|
||||
|
||||
endpointsCachePollInterval := time.Duration(srvCfg.EndpointsCachePollIntervalMS) * time.Millisecond
|
||||
if timeoutsCfg.WorkloadReplicas < endpointsCachePollInterval {
|
||||
return fmt.Errorf(
|
||||
"workload replicas timeout (%s) should not be less than the Endpoints Cache Poll Interval (%s)",
|
||||
timeoutsCfg.WorkloadReplicas,
|
||||
endpointsCachePollInterval,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -5,65 +5,69 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
)
|
||||
|
||||
// forwardWaitFunc is a function that waits for a condition
|
||||
// before proceeding to serve the request.
|
||||
type forwardWaitFunc func(context.Context, string, string) (int, error)
|
||||
type forwardWaitFunc func(context.Context, string, string) (bool, error)
|
||||
|
||||
func deploymentCanServe(depl appsv1.Deployment) bool {
|
||||
return depl.Status.ReadyReplicas > 0
|
||||
func workloadActiveEndpoints(endpoints v1.Endpoints) int {
|
||||
total := 0
|
||||
for _, subset := range endpoints.Subsets {
|
||||
total += len(subset.Addresses)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func newDeployReplicasForwardWaitFunc(
|
||||
func newWorkloadReplicasForwardWaitFunc(
|
||||
lggr logr.Logger,
|
||||
deployCache k8s.DeploymentCache,
|
||||
endpointCache k8s.EndpointsCache,
|
||||
) forwardWaitFunc {
|
||||
return func(ctx context.Context, deployNS, deployName string) (int, error) {
|
||||
return func(ctx context.Context, endpointNS, endpointName string) (bool, error) {
|
||||
// get a watcher & its result channel before querying the
|
||||
// deployment cache, to ensure we don't miss events
|
||||
watcher, err := deployCache.Watch(deployNS, deployName)
|
||||
// endpoints cache, to ensure we don't miss events
|
||||
watcher, err := endpointCache.Watch(endpointNS, endpointName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return false, err
|
||||
}
|
||||
eventCh := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
deployment, err := deployCache.Get(deployNS, deployName)
|
||||
endpoints, err := endpointCache.Get(endpointNS, endpointName)
|
||||
if err != nil {
|
||||
// if we didn't get the initial deployment state, bail out
|
||||
return 0, fmt.Errorf(
|
||||
"error getting state for deployment %s/%s: %w",
|
||||
deployNS,
|
||||
deployName,
|
||||
// if we didn't get the initial endpoints state, bail out
|
||||
return false, fmt.Errorf(
|
||||
"error getting state for endpoints %s/%s: %w",
|
||||
endpointNS,
|
||||
endpointName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
// if there is 1 or more replica, we're done waiting
|
||||
if deploymentCanServe(deployment) {
|
||||
return int(deployment.Status.ReadyReplicas), nil
|
||||
// if there is 1 or more active endpoints, we're done waiting
|
||||
activeEndpoints := workloadActiveEndpoints(endpoints)
|
||||
if activeEndpoints > 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
deployment, ok := event.Object.(*appsv1.Deployment)
|
||||
endpoints, ok := event.Object.(*v1.Endpoints)
|
||||
if !ok {
|
||||
lggr.Info(
|
||||
"Didn't get a deployment back in event",
|
||||
"Didn't get a endpoints back in event",
|
||||
)
|
||||
} else if deploymentCanServe(*deployment) {
|
||||
return 0, nil
|
||||
} else if activeEndpoints := workloadActiveEndpoints(*endpoints); activeEndpoints > 0 {
|
||||
return true, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// otherwise, if the context is marked done before
|
||||
// we're done waiting, fail.
|
||||
return 0, fmt.Errorf(
|
||||
"context marked done while waiting for deployment %s to reach > 0 replicas: %w",
|
||||
deployName,
|
||||
return false, fmt.Errorf(
|
||||
"context marked done while waiting for workload reach > 0 replicas: %w",
|
||||
ctx.Err(),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
|
@ -17,70 +16,55 @@ import (
|
|||
)
|
||||
|
||||
// Test to make sure the wait function returns a nil error if there is immediately
|
||||
// one replica on the target deployment
|
||||
// one active endpoint on the target deployment
|
||||
func TestForwardWaitFuncOneReplica(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
const waitFuncWait = 1 * time.Second
|
||||
r := require.New(t)
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerDeploy"
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
))
|
||||
const endpointsName = "TestForwardingHandler"
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
r.NoError(cache.SetSubsets(ns, endpointsName, 1))
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, waitFuncWait)
|
||||
defer done()
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
group.Go(func() error {
|
||||
_, err := waitFunc(ctx, ns, deployName)
|
||||
_, err := waitFunc(ctx, ns, endpointsName)
|
||||
return err
|
||||
})
|
||||
r.NoError(group.Wait(), "wait function failed, but it shouldn't have")
|
||||
}
|
||||
|
||||
// Test to make sure the wait function returns an error if there are no replicas, and that doesn't change
|
||||
// Test to make sure the wait function returns an error if there are active endpoints, and that doesn't change
|
||||
// within a timeout
|
||||
func TestForwardWaitFuncNoReplicas(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const waitFuncWait = 1 * time.Second
|
||||
r := require.New(t)
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerHoldsDeployment"
|
||||
deployment := newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
)
|
||||
deployment.Status.ReadyReplicas = 0
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*deployment)
|
||||
const endpointsName = "TestForwardWaitFuncNoReplicas"
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, waitFuncWait)
|
||||
defer done()
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
_, err := waitFunc(ctx, ns, deployName)
|
||||
_, err := waitFunc(ctx, ns, endpointsName)
|
||||
r.Error(err)
|
||||
}
|
||||
|
||||
|
@ -90,100 +74,58 @@ func TestWaitFuncWaitsUntilReplicas(t *testing.T) {
|
|||
totalWaitDur := 500 * time.Millisecond
|
||||
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerHoldsDeployment"
|
||||
deployment := newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
)
|
||||
deployment.Spec.Replicas = k8s.Int32P(0)
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*deployment)
|
||||
const endpointsName = "TestForwardingHandlerHolds"
|
||||
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
// create a watcher first so that the goroutine
|
||||
// can later fetch it and send a message on it
|
||||
_, err := cache.Watch(ns, deployName)
|
||||
_, err := cache.Watch(ns, endpointsName)
|
||||
r.NoError(err)
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, totalWaitDur)
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
// this channel will be closed immediately after the replicas were increased
|
||||
replicasIncreasedCh := make(chan struct{})
|
||||
// this channel will be closed immediately after the active endpoints were increased
|
||||
activeEndpointsIncreasedCh := make(chan struct{})
|
||||
go func() {
|
||||
time.Sleep(totalWaitDur / 2)
|
||||
watcher := cache.GetWatcher(ns, deployName)
|
||||
watcher := cache.GetWatcher(ns, endpointsName)
|
||||
r.NotNil(watcher, "watcher was not found")
|
||||
modifiedDeployment := deployment.DeepCopy()
|
||||
modifiedDeployment.Spec.Replicas = k8s.Int32P(1)
|
||||
watcher.Action(watch.Modified, modifiedDeployment)
|
||||
close(replicasIncreasedCh)
|
||||
modifiedEndpoints := endpoints.DeepCopy()
|
||||
modifiedEndpoints.Subsets = []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
}
|
||||
watcher.Action(watch.Modified, modifiedEndpoints)
|
||||
close(activeEndpointsIncreasedCh)
|
||||
}()
|
||||
_, err = waitFunc(ctx, ns, deployName)
|
||||
_, err = waitFunc(ctx, ns, endpointsName)
|
||||
r.NoError(err)
|
||||
done()
|
||||
}
|
||||
|
||||
// newDeployment creates a new deployment object
|
||||
// newEndpoint creates a new endpoints object
|
||||
// with the given name and the given image. This does not actually create
|
||||
// the deployment in the cluster, it just creates the deployment object
|
||||
// the endpoints in the cluster, it just creates the endpoints object
|
||||
// in memory
|
||||
func newDeployment(
|
||||
func newEndpoint(
|
||||
namespace,
|
||||
name,
|
||||
image string,
|
||||
ports []int32,
|
||||
env []corev1.EnvVar,
|
||||
labels map[string]string,
|
||||
pullPolicy corev1.PullPolicy,
|
||||
) *appsv1.Deployment {
|
||||
containerPorts := make([]corev1.ContainerPort, len(ports))
|
||||
for i, port := range ports {
|
||||
containerPorts[i] = corev1.ContainerPort{
|
||||
ContainerPort: port,
|
||||
}
|
||||
}
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
},
|
||||
name string,
|
||||
) *v1.Endpoints {
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Replicas: k8s.Int32P(1),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: name,
|
||||
ImagePullPolicy: pullPolicy,
|
||||
Ports: containerPorts,
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.DeploymentStatus{
|
||||
ReadyReplicas: 1,
|
||||
},
|
||||
}
|
||||
|
||||
return deployment
|
||||
return endpoints
|
||||
}
|
||||
|
|
|
@ -5,6 +5,12 @@ import (
|
|||
"net/http"
|
||||
"net/http/httputil"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
|
@ -13,12 +19,16 @@ var (
|
|||
)
|
||||
|
||||
type Upstream struct {
|
||||
roundTripper http.RoundTripper
|
||||
roundTripper http.RoundTripper
|
||||
tracingCfg *config.Tracing
|
||||
shouldFailover bool
|
||||
}
|
||||
|
||||
func NewUpstream(roundTripper http.RoundTripper) *Upstream {
|
||||
func NewUpstream(roundTripper http.RoundTripper, tracingCfg *config.Tracing, shouldFailover bool) *Upstream {
|
||||
return &Upstream{
|
||||
roundTripper: roundTripper,
|
||||
roundTripper: roundTripper,
|
||||
tracingCfg: tracingCfg,
|
||||
shouldFailover: shouldFailover,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +38,26 @@ func (uh *Upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
r = util.RequestWithLoggerWithName(r, "UpstreamHandler")
|
||||
ctx := r.Context()
|
||||
|
||||
if uh.tracingCfg.Enabled {
|
||||
p := otel.GetTextMapPropagator()
|
||||
ctx = p.Extract(ctx, propagation.HeaderCarrier(r.Header))
|
||||
|
||||
p.Inject(ctx, propagation.HeaderCarrier(w.Header()))
|
||||
|
||||
span := trace.SpanFromContext(ctx)
|
||||
defer span.End()
|
||||
|
||||
serviceValAttr := attribute.String("service", "keda-http-interceptor-proxy-upstream")
|
||||
coldStartValAttr := attribute.String("cold-start", w.Header().Get("X-KEDA-HTTP-Cold-Start"))
|
||||
|
||||
span.SetAttributes(serviceValAttr, coldStartValAttr)
|
||||
}
|
||||
|
||||
stream := util.StreamFromContext(ctx)
|
||||
if uh.shouldFailover {
|
||||
stream = util.FailoverStreamFromContext(ctx)
|
||||
}
|
||||
|
||||
if stream == nil {
|
||||
sh := NewStatic(http.StatusInternalServerError, errNilStream)
|
||||
sh.ServeHTTP(w, r)
|
||||
|
@ -43,6 +72,7 @@ func (uh *Upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
superDirector(req)
|
||||
req.URL = stream
|
||||
req.URL.Path = r.URL.Path
|
||||
req.URL.RawPath = r.URL.RawPath
|
||||
req.URL.RawQuery = r.URL.RawQuery
|
||||
// delete the incoming X-Forwarded-For header so the proxy
|
||||
// puts its own in. This is also important to prevent IP spoofing
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -9,13 +11,227 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/sdk/trace/tracetest"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
traceID = "a8419b25ec2051e5"
|
||||
fullW3CLengthTraceID = "29b3290dc5a93f2618b17502ccb2a728"
|
||||
spanID = "97337bce1bc3e368"
|
||||
parentSpanID = "2890e7e08fc6592b"
|
||||
sampled = "1"
|
||||
w3cPadding = "0000000000000000"
|
||||
)
|
||||
|
||||
func TestB3MultiPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
request.Header.Set("X-B3-Traceid", traceID)
|
||||
request.Header.Set("X-B3-Spanid", spanID)
|
||||
request.Header.Set("X-B3-Parentspanid", parentSpanID)
|
||||
request.Header.Set("X-B3-Sampled", sampled)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("X-B3-Parentspanid"), parentSpanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Traceid"), traceID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Spanid"), spanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Sampled"), sampled)
|
||||
|
||||
r.NotContains(receivedHeaders, "Traceparent")
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(w3cPadding+traceID, sc.TraceID().String())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestW3CAndB3MultiPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
request.Header.Set("X-B3-Traceid", traceID)
|
||||
request.Header.Set("X-B3-Spanid", spanID)
|
||||
request.Header.Set("X-B3-Parentspanid", parentSpanID)
|
||||
request.Header.Set("X-B3-Sampled", sampled)
|
||||
request.Header.Set("Traceparent", w3cPadding+traceID)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("X-B3-Parentspanid"), parentSpanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Traceid"), traceID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Spanid"), spanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Sampled"), sampled)
|
||||
r.Equal(receivedHeaders.Get("Traceparent"), w3cPadding+traceID)
|
||||
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(w3cPadding+traceID, sc.TraceID().String())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestW3CPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
traceParent := fmt.Sprintf("00-%s-%s-01", fullW3CLengthTraceID, spanID)
|
||||
request.Header.Set("Traceparent", traceParent)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("Traceparent"), traceParent)
|
||||
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
r.NotContains(receivedHeaders, "X-B3-Parentspanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Traceid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Spanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Sampled")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(fullW3CLengthTraceID, sc.TraceID().String())
|
||||
r.Equal(true, sc.IsSampled())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestPropagationWhenNoHeaders(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.NotContains(receivedHeaders, "Traceparent")
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
r.NotContains(receivedHeaders, "X-B3-Parentspanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Traceid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Spanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Sampled")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.NotEmpty(sc.SpanID())
|
||||
r.NotEmpty(sc.TraceID())
|
||||
|
||||
hasServiceAttribute := false
|
||||
hasColdStartAttribute := false
|
||||
for _, attribute := range exportedSpans[0].Attributes {
|
||||
if attribute.Key == "service" && attribute.Value.AsString() == "keda-http-interceptor-proxy-upstream" {
|
||||
hasServiceAttribute = true
|
||||
}
|
||||
|
||||
if attribute.Key == "cold-start" {
|
||||
hasColdStartAttribute = true
|
||||
}
|
||||
}
|
||||
r.True(hasServiceAttribute)
|
||||
r.True(hasColdStartAttribute)
|
||||
}
|
||||
|
||||
func TestForwarderSuccess(t *testing.T) {
|
||||
r := require.New(t)
|
||||
// this channel will be closed after the request was received, but
|
||||
|
@ -43,7 +259,7 @@ func TestForwarderSuccess(t *testing.T) {
|
|||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
|
||||
r.True(
|
||||
|
@ -88,7 +304,7 @@ func TestForwarderHeaderTimeout(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
|
||||
forwardedRequests := hdl.IncomingRequests()
|
||||
|
@ -138,7 +354,7 @@ func TestForwarderWaitsForSlowOrigin(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
// wait for the goroutine above to finish, with a little cusion
|
||||
ensureSignalBeforeTimeout(originWaitCh, originDelay*2)
|
||||
|
@ -161,7 +377,7 @@ func TestForwarderConnectionRetryAndTimeout(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, noSuchURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
|
||||
start := time.Now()
|
||||
uh.ServeHTTP(res, req)
|
||||
|
@ -217,7 +433,7 @@ func TestForwardRequestRedirectAndHeaders(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, srvURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
r.Equal(301, res.Code)
|
||||
r.Equal("abc123.com", res.Header().Get("Location"))
|
||||
|
@ -238,10 +454,10 @@ func newRoundTripper(
|
|||
|
||||
func defaultTimeouts() config.Timeouts {
|
||||
return config.Timeouts{
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
DeploymentReplicas: 1 * time.Second,
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
WorkloadReplicas: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,3 +497,56 @@ func ensureSignalBeforeTimeout(signalCh <-chan struct{}, timeout time.Duration)
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func serveHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
upstream := NewUpstream(rt, &config.Tracing{Enabled: true}, false)
|
||||
|
||||
upstream.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func setupOTelSDKForTesting() (*tracetest.InMemoryExporter, *trace.TracerProvider) {
|
||||
exporter := tracetest.NewInMemoryExporter()
|
||||
traceProvider := trace.NewTracerProvider(trace.WithBatcher(exporter, trace.WithBatchTimeout(time.Second)))
|
||||
otel.SetTracerProvider(traceProvider)
|
||||
prop := tracing.NewPropagator()
|
||||
otel.SetTextMapPropagator(prop)
|
||||
return exporter, traceProvider
|
||||
}
|
||||
|
||||
func startMicroservice(t *testing.T) (*kedanet.TestHTTPHandlerWrapper, *url.URL, func()) {
|
||||
assert := require.New(t)
|
||||
requestReceiveChannel := make(chan struct{})
|
||||
|
||||
const respCode = 200
|
||||
const respBody = "Success Response"
|
||||
microservice := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
close(requestReceiveChannel)
|
||||
w.WriteHeader(respCode)
|
||||
_, err := w.Write([]byte(respBody))
|
||||
assert.NoError(err)
|
||||
}),
|
||||
)
|
||||
server := httptest.NewServer(microservice)
|
||||
|
||||
url, err := url.Parse(server.URL)
|
||||
assert.NoError(err)
|
||||
|
||||
return microservice, url, func() {
|
||||
server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func createRequestAndResponse(method string, url *url.URL) (*http.Request, http.ResponseWriter) {
|
||||
ctx := util.ContextWithStream(context.Background(), url)
|
||||
request, _ := http.NewRequestWithContext(ctx, method, url.String(), nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
return request, recorder
|
||||
}
|
||||
|
||||
func withAutoInstrumentation(sut func(w http.ResponseWriter, r *http.Request)) http.Handler {
|
||||
return otelhttp.NewHandler(http.HandlerFunc(sut), "SystemUnderTest")
|
||||
}
|
||||
|
|
|
@ -2,104 +2,148 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/sync/errgroup"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/handler"
|
||||
"github.com/kedacore/http-add-on/interceptor/metrics"
|
||||
"github.com/kedacore/http-add-on/interceptor/middleware"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
clientset "github.com/kedacore/http-add-on/operator/generated/clientset/versioned"
|
||||
informers "github.com/kedacore/http-add-on/operator/generated/informers/externalversions"
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
kedahttp "github.com/kedacore/http-add-on/pkg/http"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
pkglog "github.com/kedacore/http-add-on/pkg/log"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
"github.com/kedacore/http-add-on/pkg/routing"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch
|
||||
var (
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups=http.keda.sh,resources=httpscaledobjects,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch
|
||||
|
||||
func main() {
|
||||
lggr, err := pkglog.NewZapr()
|
||||
if err != nil {
|
||||
fmt.Println("Error building logger", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer os.Exit(1)
|
||||
timeoutCfg := config.MustParseTimeouts()
|
||||
servingCfg := config.MustParseServing()
|
||||
if err := config.Validate(*servingCfg, *timeoutCfg); err != nil {
|
||||
lggr.Error(err, "invalid configuration")
|
||||
os.Exit(1)
|
||||
metricsCfg := config.MustParseMetrics()
|
||||
tracingCfg := config.MustParseTracing()
|
||||
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
if err := config.Validate(servingCfg, *timeoutCfg, ctrl.Log); err != nil {
|
||||
setupLog.Error(err, "invalid configuration")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
lggr.Info(
|
||||
setupLog.Info(
|
||||
"starting interceptor",
|
||||
"timeoutConfig",
|
||||
timeoutCfg,
|
||||
"servingConfig",
|
||||
servingCfg,
|
||||
"metricsConfig",
|
||||
metricsCfg,
|
||||
)
|
||||
|
||||
proxyPort := servingCfg.ProxyPort
|
||||
adminPort := servingCfg.AdminPort
|
||||
proxyTLSEnabled := servingCfg.ProxyTLSEnabled
|
||||
profilingAddr := servingCfg.ProfilingAddr
|
||||
|
||||
// setup the configured metrics collectors
|
||||
metrics.NewMetricsCollectors(metricsCfg)
|
||||
|
||||
cfg := ctrl.GetConfigOrDie()
|
||||
|
||||
cl, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new Kubernetes ClientSet")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new Kubernetes ClientSet")
|
||||
runtime.Goexit()
|
||||
}
|
||||
deployCache := k8s.NewInformerBackedDeploymentCache(
|
||||
lggr,
|
||||
cl,
|
||||
time.Millisecond*time.Duration(servingCfg.DeploymentCachePollIntervalMS),
|
||||
)
|
||||
|
||||
k8sSharedInformerFactory := k8sinformers.NewSharedInformerFactory(cl, time.Millisecond*time.Duration(servingCfg.EndpointsCachePollIntervalMS))
|
||||
svcCache := k8s.NewInformerBackedServiceCache(ctrl.Log, cl, k8sSharedInformerFactory)
|
||||
endpointsCache := k8s.NewInformerBackedEndpointsCache(ctrl.Log, cl, time.Millisecond*time.Duration(servingCfg.EndpointsCachePollIntervalMS))
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new deployment cache")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new endpoints cache")
|
||||
runtime.Goexit()
|
||||
}
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(lggr, deployCache)
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(ctrl.Log, endpointsCache)
|
||||
|
||||
httpCl, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new HTTP ClientSet")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new HTTP ClientSet")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
queues := queue.NewMemory()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(httpCl, servingCfg.ConfigMapCacheRsyncPeriod)
|
||||
routingTable, err := routing.NewTable(sharedInformerFactory, servingCfg.WatchNamespace)
|
||||
routingTable, err := routing.NewTable(sharedInformerFactory, servingCfg.WatchNamespace, queues)
|
||||
if err != nil {
|
||||
lggr.Error(err, "fetching routing table")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "fetching routing table")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
q := queue.NewMemory()
|
||||
|
||||
lggr.Info("Interceptor starting")
|
||||
setupLog.Info("Interceptor starting")
|
||||
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
ctx = util.ContextWithLogger(ctx, lggr)
|
||||
ctx = util.ContextWithLogger(ctx, ctrl.Log)
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// start the deployment cache updater
|
||||
eg.Go(func() error {
|
||||
lggr.Info("starting the deployment cache")
|
||||
if tracingCfg.Enabled {
|
||||
shutdown, err := tracing.SetupOTelSDK(ctx, tracingCfg)
|
||||
|
||||
deployCache.Start(ctx)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "Error setting up tracer")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = errors.Join(err, shutdown(context.Background()))
|
||||
}()
|
||||
}
|
||||
|
||||
// start the endpoints cache updater
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("starting the endpoints cache")
|
||||
|
||||
endpointsCache.Start(ctx)
|
||||
k8sSharedInformerFactory.Start(ctx.Done())
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -107,10 +151,10 @@ func main() {
|
|||
// the ConfigMap that the operator updates as HTTPScaledObjects
|
||||
// enter and exit the system
|
||||
eg.Go(func() error {
|
||||
lggr.Info("starting the routing table")
|
||||
setupLog.Info("starting the routing table")
|
||||
|
||||
if err := routingTable.Start(ctx); !util.IsIgnoredErr(err) {
|
||||
lggr.Error(err, "routing table failed")
|
||||
setupLog.Error(err, "routing table failed")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -120,37 +164,77 @@ func main() {
|
|||
// start the administrative server. this is the server
|
||||
// that serves the queue size API
|
||||
eg.Go(func() error {
|
||||
lggr.Info("starting the admin server", "port", adminPort)
|
||||
setupLog.Info("starting the admin server", "port", adminPort)
|
||||
|
||||
if err := runAdminServer(ctx, lggr, q, adminPort); !util.IsIgnoredErr(err) {
|
||||
lggr.Error(err, "admin server failed")
|
||||
if err := runAdminServer(ctx, ctrl.Log, queues, adminPort); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "admin server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// start the proxy server. this is the server that
|
||||
// accepts, holds and forwards user requests
|
||||
eg.Go(func() error {
|
||||
lggr.Info("starting the proxy server", "port", proxyPort)
|
||||
if metricsCfg.OtelPrometheusExporterEnabled {
|
||||
// start the prometheus compatible metrics server
|
||||
// serves a prometheus compatible metrics endpoint on the configured port
|
||||
eg.Go(func() error {
|
||||
if err := runMetricsServer(ctx, ctrl.Log, metricsCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "could not start the Prometheus metrics server")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := runProxyServer(ctx, lggr, q, waitFunc, routingTable, timeoutCfg, proxyPort); !util.IsIgnoredErr(err) {
|
||||
lggr.Error(err, "proxy server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
build.PrintComponentInfo(lggr, "Interceptor")
|
||||
|
||||
if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
lggr.Error(err, "fatal error")
|
||||
os.Exit(1)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
lggr.Info("Bye!")
|
||||
// start the proxy servers. This is the server that
|
||||
// accepts, holds and forwards user requests
|
||||
// start a proxy server with TLS
|
||||
if proxyTLSEnabled {
|
||||
eg.Go(func() error {
|
||||
proxyTLSConfig := map[string]interface{}{"certificatePath": servingCfg.TLSCertPath, "keyPath": servingCfg.TLSKeyPath, "certstorePaths": servingCfg.TLSCertStorePaths, "skipVerify": servingCfg.TLSSkipVerify}
|
||||
proxyTLSPort := servingCfg.TLSPort
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
setupLog.Info("starting the proxy server with TLS enabled", "port", proxyTLSPort)
|
||||
|
||||
if err := runProxyServer(ctx, ctrl.Log, queues, waitFunc, routingTable, svcCache, timeoutCfg, proxyTLSPort, proxyTLSEnabled, proxyTLSConfig, tracingCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "tls proxy server failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// start a proxy server without TLS.
|
||||
eg.Go(func() error {
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
setupLog.Info("starting the proxy server with TLS disabled", "port", proxyPort)
|
||||
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
if err := runProxyServer(ctx, ctrl.Log, queues, waitFunc, routingTable, svcCache, timeoutCfg, proxyPort, false, nil, tracingCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "proxy server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if len(profilingAddr) > 0 {
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("enabling pprof for profiling", "address", profilingAddr)
|
||||
return http.ListenAndServe(profilingAddr, nil)
|
||||
})
|
||||
}
|
||||
|
||||
build.PrintComponentInfo(ctrl.Log, "Interceptor")
|
||||
|
||||
if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
setupLog.Error(err, "fatal error")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
setupLog.Info("Bye!")
|
||||
}
|
||||
|
||||
func runAdminServer(
|
||||
|
@ -169,7 +253,148 @@ func runAdminServer(
|
|||
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", port)
|
||||
lggr.Info("admin server starting", "address", addr)
|
||||
return kedahttp.ServeContext(ctx, addr, adminServer)
|
||||
return kedahttp.ServeContext(ctx, addr, adminServer, nil)
|
||||
}
|
||||
|
||||
func runMetricsServer(
|
||||
ctx context.Context,
|
||||
lggr logr.Logger,
|
||||
metricsCfg *config.Metrics,
|
||||
) error {
|
||||
lggr.Info("starting the prometheus metrics server", "port", metricsCfg.OtelPrometheusExporterPort, "path", "/metrics")
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", metricsCfg.OtelPrometheusExporterPort)
|
||||
return kedahttp.ServeContext(ctx, addr, promhttp.Handler(), nil)
|
||||
}
|
||||
|
||||
// addCert adds a certificate to the map of certificates based on the certificate's SANs
|
||||
func addCert(m map[string]tls.Certificate, certPath, keyPath string, logger logr.Logger) (*tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading certificate and key: %w", err)
|
||||
}
|
||||
if cert.Leaf == nil {
|
||||
if len(cert.Certificate) == 0 {
|
||||
return nil, fmt.Errorf("no certificate found in certificate chain")
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %w", err)
|
||||
}
|
||||
}
|
||||
for _, d := range cert.Leaf.DNSNames {
|
||||
logger.Info("adding certificate", "dns", d)
|
||||
m[d] = cert
|
||||
}
|
||||
for _, ip := range cert.Leaf.IPAddresses {
|
||||
logger.Info("adding certificate", "ip", ip.String())
|
||||
m[ip.String()] = cert
|
||||
}
|
||||
for _, uri := range cert.Leaf.URIs {
|
||||
logger.Info("adding certificate", "uri", uri.String())
|
||||
m[uri.String()] = cert
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
|
||||
func defaultCertPool(logger logr.Logger) *x509.CertPool {
|
||||
systemCAs, err := x509.SystemCertPool()
|
||||
if err == nil {
|
||||
return systemCAs
|
||||
}
|
||||
|
||||
logger.Info("error loading system CA pool, using empty pool", "error", err)
|
||||
return x509.NewCertPool()
|
||||
}
|
||||
|
||||
// getTLSConfig creates a TLS config from KEDA_HTTP_PROXY_TLS_CERT_PATH, KEDA_HTTP_PROXY_TLS_KEY_PATH and KEDA_HTTP_PROXY_TLS_CERTSTORE_PATHS
|
||||
// The matching between request and certificate is performed by comparing TLS/SNI server name with x509 SANs
|
||||
func getTLSConfig(tlsConfig map[string]interface{}, logger logr.Logger) (*tls.Config, error) {
|
||||
certPath, _ := tlsConfig["certificatePath"].(string)
|
||||
keyPath, _ := tlsConfig["keyPath"].(string)
|
||||
certStorePaths, _ := tlsConfig["certstorePaths"].(string)
|
||||
insecureSkipVerify, _ := tlsConfig["skipVerify"].(bool)
|
||||
|
||||
servingTLS := &tls.Config{
|
||||
RootCAs: defaultCertPool(logger),
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
}
|
||||
var defaultCert *tls.Certificate
|
||||
|
||||
uriDomainsToCerts := make(map[string]tls.Certificate)
|
||||
if certPath != "" && keyPath != "" {
|
||||
cert, err := addCert(uriDomainsToCerts, certPath, keyPath, logger)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error adding certificate and key: %w", err)
|
||||
}
|
||||
defaultCert = cert
|
||||
rawCert, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error reading certificate: %w", err)
|
||||
}
|
||||
servingTLS.RootCAs.AppendCertsFromPEM(rawCert)
|
||||
}
|
||||
|
||||
if certStorePaths != "" {
|
||||
certFiles := make(map[string]string)
|
||||
keyFiles := make(map[string]string)
|
||||
dirPaths := strings.Split(certStorePaths, ",")
|
||||
for _, dir := range dirPaths {
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
switch {
|
||||
case strings.HasSuffix(path, "-key.pem"):
|
||||
certID := path[:len(path)-8]
|
||||
keyFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".pem"):
|
||||
certID := path[:len(path)-4]
|
||||
certFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".key"):
|
||||
certID := path[:len(path)-4]
|
||||
keyFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".crt"):
|
||||
certID := path[:len(path)-4]
|
||||
certFiles[certID] = path
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error walking certificate store: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for certID, certPath := range certFiles {
|
||||
logger.Info("adding certificate", "certID", certID, "certPath", certPath)
|
||||
keyPath, ok := keyFiles[certID]
|
||||
if !ok {
|
||||
return servingTLS, fmt.Errorf("no key found for certificate %s", certPath)
|
||||
}
|
||||
if _, err := addCert(uriDomainsToCerts, certPath, keyPath, logger); err != nil {
|
||||
return servingTLS, fmt.Errorf("error adding certificate %s: %w", certPath, err)
|
||||
}
|
||||
rawCert, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error reading certificate: %w", err)
|
||||
}
|
||||
servingTLS.RootCAs.AppendCertsFromPEM(rawCert)
|
||||
}
|
||||
}
|
||||
|
||||
servingTLS.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if cert, ok := uriDomainsToCerts[hello.ServerName]; ok {
|
||||
return &cert, nil
|
||||
}
|
||||
if defaultCert != nil {
|
||||
return defaultCert, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no certificate found for %s", hello.ServerName)
|
||||
}
|
||||
servingTLS.Certificates = maps.Values(uriDomainsToCerts)
|
||||
return servingTLS, nil
|
||||
}
|
||||
|
||||
func runProxyServer(
|
||||
|
@ -178,8 +403,12 @@ func runProxyServer(
|
|||
q queue.Counter,
|
||||
waitFunc forwardWaitFunc,
|
||||
routingTable routing.Table,
|
||||
svcCache k8s.ServiceCache,
|
||||
timeouts *config.Timeouts,
|
||||
port int,
|
||||
tlsEnabled bool,
|
||||
tlsConfig map[string]interface{},
|
||||
tracingConfig *config.Tracing,
|
||||
) error {
|
||||
dialer := kedanet.NewNetDialer(timeouts.Connect, timeouts.KeepAlive)
|
||||
dialContextFunc := kedanet.DialContextWithRetry(dialer, timeouts.DefaultBackoff())
|
||||
|
@ -189,12 +418,31 @@ func runProxyServer(
|
|||
})
|
||||
go probeHandler.Start(ctx)
|
||||
|
||||
var tlsCfg *tls.Config
|
||||
if tlsEnabled {
|
||||
cfg, err := getTLSConfig(tlsConfig, logger)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Errorf("error creating certGetter for proxy server"), "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
tlsCfg = cfg
|
||||
}
|
||||
|
||||
var upstreamHandler http.Handler
|
||||
forwardingTLSCfg := &tls.Config{}
|
||||
if tlsCfg != nil {
|
||||
forwardingTLSCfg.RootCAs = tlsCfg.RootCAs
|
||||
forwardingTLSCfg.Certificates = tlsCfg.Certificates
|
||||
forwardingTLSCfg.InsecureSkipVerify = tlsCfg.InsecureSkipVerify
|
||||
}
|
||||
|
||||
upstreamHandler = newForwardingHandler(
|
||||
logger,
|
||||
dialContextFunc,
|
||||
waitFunc,
|
||||
newForwardingConfigFromTimeouts(timeouts),
|
||||
forwardingTLSCfg,
|
||||
tracingConfig,
|
||||
)
|
||||
upstreamHandler = middleware.NewCountingMiddleware(
|
||||
q,
|
||||
|
@ -206,13 +454,27 @@ func runProxyServer(
|
|||
routingTable,
|
||||
probeHandler,
|
||||
upstreamHandler,
|
||||
svcCache,
|
||||
tlsEnabled,
|
||||
)
|
||||
|
||||
if tracingConfig.Enabled {
|
||||
rootHandler = otelhttp.NewHandler(rootHandler, "keda-http-interceptor")
|
||||
}
|
||||
|
||||
rootHandler = middleware.NewLogging(
|
||||
logger,
|
||||
rootHandler,
|
||||
)
|
||||
|
||||
rootHandler = middleware.NewMetrics(
|
||||
rootHandler,
|
||||
)
|
||||
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", port)
|
||||
logger.Info("proxy server starting", "address", addr)
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler)
|
||||
if tlsEnabled {
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler, tlsCfg)
|
||||
}
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler, nil)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,11 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -13,12 +16,15 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
routingtest "github.com/kedacore/http-add-on/pkg/routing/test"
|
||||
)
|
||||
|
||||
const falseStr = "false"
|
||||
|
||||
func TestRunProxyServerCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8080
|
||||
|
@ -47,6 +53,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testservice",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
|
@ -57,13 +64,23 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (int, error) {
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return 1, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
_, err = tracing.SetupOTelSDK(ctx, &tracingCfg)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err, "Error setting up tracer")
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
|
@ -71,8 +88,12 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
false,
|
||||
map[string]interface{}{},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
// wait for server to start
|
||||
|
@ -101,6 +122,10 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if _, ok := resp.Header["Traceparent"]; !ok {
|
||||
return fmt.Errorf("expected Traceparent header to exist, but the header wasn't found")
|
||||
}
|
||||
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != "false" {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
|
@ -110,7 +135,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(+1, hostAndCount.Count)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
@ -125,7 +150,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(-1, hostAndCount.Count)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
@ -141,7 +166,306 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName])
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
}
|
||||
|
||||
func TestRunProxyServerWithTLSCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8443
|
||||
host = "samplehost"
|
||||
)
|
||||
r := require.New(t)
|
||||
ctx, done := context.WithCancel(
|
||||
context.Background(),
|
||||
)
|
||||
defer done()
|
||||
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
)
|
||||
originSrv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer originSrv.Close()
|
||||
originPort, err := strconv.Atoi(originURL.Port())
|
||||
r.NoError(err)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
q := queue.NewFakeCounter()
|
||||
|
||||
httpso := targetFromURL(
|
||||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testsvc",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
// set up a fake host that we can spoof
|
||||
// when we later send request to the proxy,
|
||||
// so that the proxy calculates a URL for that
|
||||
// host that points to the (above) fake origin
|
||||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return false, nil
|
||||
}
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
logr.Discard(),
|
||||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
true,
|
||||
map[string]interface{}{"certificatePath": "../certs/tls.crt", "keyPath": "../certs/tls.key", "skipVerify": true},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
|
||||
// wait for server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// make an HTTPs request in the background
|
||||
g.Go(func() error {
|
||||
f, err := os.ReadFile("../certs/RootCA.pem")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to find RootCA for test, please run tests via `make test`")
|
||||
}
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
rootCAs.AppendCertsFromPEM(f)
|
||||
|
||||
http.DefaultClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: rootCAs},
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"GET",
|
||||
fmt.Sprintf(
|
||||
"https://localhost:%d", port,
|
||||
), nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Host = host
|
||||
// Allow us to use our self made certs
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf(
|
||||
"unexpected status code: %d",
|
||||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != falseStr {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2000 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
||||
// tell the wait func to proceed
|
||||
select {
|
||||
case waiterCh <- struct{}{}:
|
||||
case <-time.After(5 * time.Second):
|
||||
r.Fail("timeout producing on waiterCh")
|
||||
}
|
||||
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
||||
// check the queue to make sure all counts are at 0
|
||||
countsPtr, err := q.Current()
|
||||
r.NoError(err)
|
||||
counts := countsPtr.Counts
|
||||
r.Equal(1, len(counts))
|
||||
_, foundHost := counts[namespacedName]
|
||||
r.True(
|
||||
foundHost,
|
||||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
}
|
||||
|
||||
func TestRunProxyServerWithMultipleCertsTLSCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8443
|
||||
host = "samplehost"
|
||||
)
|
||||
r := require.New(t)
|
||||
ctx, done := context.WithCancel(
|
||||
context.Background(),
|
||||
)
|
||||
defer done()
|
||||
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
)
|
||||
originSrv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer originSrv.Close()
|
||||
originPort, err := strconv.Atoi(originURL.Port())
|
||||
r.NoError(err)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
q := queue.NewFakeCounter()
|
||||
|
||||
httpso := targetFromURL(
|
||||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testsvc",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
// set up a fake host that we can spoof
|
||||
// when we later send request to the proxy,
|
||||
// so that the proxy calculates a URL for that
|
||||
// host that points to the (above) fake origin
|
||||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return false, nil
|
||||
}
|
||||
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
logr.Discard(),
|
||||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
true,
|
||||
map[string]interface{}{"certstorePaths": "../certs"},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
|
||||
// wait for server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// make an HTTPs request in the background
|
||||
g.Go(func() error {
|
||||
f, err := os.ReadFile("../certs/RootCA.pem")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to find RootCA for test, please run tests via `make test`")
|
||||
}
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
rootCAs.AppendCertsFromPEM(f)
|
||||
|
||||
http.DefaultClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: rootCAs},
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"GET",
|
||||
fmt.Sprintf(
|
||||
"https://localhost:%d", port,
|
||||
), nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Host = host
|
||||
// Allow us to use our self made certs
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf(
|
||||
"unexpected status code: %d",
|
||||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != falseStr {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2000 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
||||
// tell the wait func to proceed
|
||||
select {
|
||||
case waiterCh <- struct{}{}:
|
||||
case <-time.After(5 * time.Second):
|
||||
r.Fail("timeout producing on waiterCh")
|
||||
}
|
||||
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
||||
// check the queue to make sure all counts are at 0
|
||||
countsPtr, err := q.Current()
|
||||
r.NoError(err)
|
||||
counts := countsPtr.Counts
|
||||
r.Equal(1, len(counts))
|
||||
_, foundHost := counts[namespacedName]
|
||||
r.True(
|
||||
foundHost,
|
||||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
)
|
||||
|
||||
var (
|
||||
collectors []Collector
|
||||
)
|
||||
|
||||
const meterName = "keda-interceptor-proxy"
|
||||
|
||||
type Collector interface {
|
||||
RecordRequestCount(method string, path string, responseCode int, host string)
|
||||
RecordPendingRequestCount(host string, value int64)
|
||||
}
|
||||
|
||||
func NewMetricsCollectors(metricsConfig *config.Metrics) {
|
||||
if metricsConfig.OtelPrometheusExporterEnabled {
|
||||
promometrics := NewPrometheusMetrics()
|
||||
collectors = append(collectors, promometrics)
|
||||
}
|
||||
|
||||
if metricsConfig.OtelHTTPExporterEnabled {
|
||||
otelhttpmetrics := NewOtelMetrics()
|
||||
collectors = append(collectors, otelhttpmetrics)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
for _, collector := range collectors {
|
||||
collector.RecordRequestCount(method, path, responseCode, host)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordPendingRequestCount(host string, value int64) {
|
||||
for _, collector := range collectors {
|
||||
collector.RecordPendingRequestCount(host, value)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
api "go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
)
|
||||
|
||||
type OtelMetrics struct {
|
||||
meter api.Meter
|
||||
requestCounter api.Int64Counter
|
||||
pendingRequestCounter api.Int64UpDownCounter
|
||||
}
|
||||
|
||||
func NewOtelMetrics(options ...metric.Option) *OtelMetrics {
|
||||
ctx := context.Background()
|
||||
|
||||
exporter, err := otlpmetrichttp.New(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("could not create otelmetrichttp exporter: %v", err)
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("interceptor-proxy"),
|
||||
semconv.ServiceVersionKey.String(build.Version()),
|
||||
)
|
||||
|
||||
options = []metric.Option{
|
||||
metric.WithReader(metric.NewPeriodicReader(exporter)),
|
||||
metric.WithResource(res),
|
||||
}
|
||||
}
|
||||
|
||||
provider := metric.NewMeterProvider(options...)
|
||||
meter := provider.Meter(meterName)
|
||||
|
||||
reqCounter, err := meter.Int64Counter("interceptor_request_count", api.WithDescription("a counter of requests processed by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new otelhttpmetric request counter: %v", err)
|
||||
}
|
||||
|
||||
pendingRequestCounter, err := meter.Int64UpDownCounter("interceptor_pending_request_count", api.WithDescription("a count of requests pending forwarding by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new otelhttpmetric pending request counter: %v", err)
|
||||
}
|
||||
|
||||
return &OtelMetrics{
|
||||
meter: meter,
|
||||
requestCounter: reqCounter,
|
||||
pendingRequestCounter: pendingRequestCounter,
|
||||
}
|
||||
}
|
||||
|
||||
func (om *OtelMetrics) RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("method").String(method),
|
||||
attribute.Key("path").String(path),
|
||||
attribute.Key("code").Int(responseCode),
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
om.requestCounter.Add(ctx, 1, opt)
|
||||
}
|
||||
|
||||
func (om *OtelMetrics) RecordPendingRequestCount(host string, value int64) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
|
||||
om.pendingRequestCounter.Add(ctx, value, opt)
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
var (
|
||||
testOtel *OtelMetrics
|
||||
testReader metric.Reader
|
||||
)
|
||||
|
||||
func init() {
|
||||
testReader = metric.NewManualReader()
|
||||
options := metric.WithReader(testReader)
|
||||
testOtel = NewOtelMetrics(options)
|
||||
}
|
||||
|
||||
func TestRequestCounter(t *testing.T) {
|
||||
testOtel.RecordRequestCount("GET", "/test", 200, "test-host-1")
|
||||
got := metricdata.ResourceMetrics{}
|
||||
err := testReader.Collect(context.Background(), &got)
|
||||
|
||||
assert.Nil(t, err)
|
||||
scopeMetrics := got.ScopeMetrics[0]
|
||||
assert.NotEqual(t, len(scopeMetrics.Metrics), 0)
|
||||
|
||||
metricInfo := retrieveMetric(scopeMetrics.Metrics, "interceptor_request_count")
|
||||
data := metricInfo.Data.(metricdata.Sum[int64]).DataPoints[0]
|
||||
assert.Equal(t, data.Value, int64(1))
|
||||
}
|
||||
|
||||
func TestPendingRequestCounter(t *testing.T) {
|
||||
testOtel.RecordPendingRequestCount("test-host", 5)
|
||||
got := metricdata.ResourceMetrics{}
|
||||
err := testReader.Collect(context.Background(), &got)
|
||||
|
||||
assert.Nil(t, err)
|
||||
scopeMetrics := got.ScopeMetrics[0]
|
||||
assert.NotEqual(t, len(scopeMetrics.Metrics), 0)
|
||||
|
||||
metricInfo := retrieveMetric(scopeMetrics.Metrics, "interceptor_pending_request_count")
|
||||
data := metricInfo.Data.(metricdata.Sum[int64]).DataPoints[0]
|
||||
assert.Equal(t, data.Value, int64(5))
|
||||
}
|
||||
|
||||
func retrieveMetric(metrics []metricdata.Metrics, metricname string) *metricdata.Metrics {
|
||||
for _, m := range metrics {
|
||||
if m.Name == metricname {
|
||||
return &m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
api "go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
)
|
||||
|
||||
type PrometheusMetrics struct {
|
||||
meter api.Meter
|
||||
requestCounter api.Int64Counter
|
||||
pendingRequestCounter api.Int64UpDownCounter
|
||||
}
|
||||
|
||||
func NewPrometheusMetrics(options ...prometheus.Option) *PrometheusMetrics {
|
||||
var exporter *prometheus.Exporter
|
||||
var err error
|
||||
if options == nil {
|
||||
exporter, err = prometheus.New()
|
||||
} else {
|
||||
exporter, err = prometheus.New(options...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("could not create Prometheus exporter: %v", err)
|
||||
}
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("interceptor-proxy"),
|
||||
semconv.ServiceVersionKey.String(build.Version()),
|
||||
)
|
||||
|
||||
provider := metric.NewMeterProvider(
|
||||
metric.WithReader(exporter),
|
||||
metric.WithResource(res),
|
||||
)
|
||||
meter := provider.Meter(meterName)
|
||||
|
||||
reqCounter, err := meter.Int64Counter("interceptor_request_count", api.WithDescription("a counter of requests processed by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new Prometheus request counter: %v", err)
|
||||
}
|
||||
|
||||
pendingRequestCounter, err := meter.Int64UpDownCounter("interceptor_pending_request_count", api.WithDescription("a count of requests pending forwarding by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new Prometheus pending request counter: %v", err)
|
||||
}
|
||||
|
||||
return &PrometheusMetrics{
|
||||
meter: meter,
|
||||
requestCounter: reqCounter,
|
||||
pendingRequestCounter: pendingRequestCounter,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PrometheusMetrics) RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("method").String(method),
|
||||
attribute.Key("path").String(path),
|
||||
attribute.Key("code").Int(responseCode),
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
p.requestCounter.Add(ctx, 1, opt)
|
||||
}
|
||||
|
||||
func (p *PrometheusMetrics) RecordPendingRequestCount(host string, value int64) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
|
||||
p.pendingRequestCounter.Add(ctx, value, opt)
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
promexporter "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
)
|
||||
|
||||
func TestPromRequestCountMetric(t *testing.T) {
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
options := []promexporter.Option{promexporter.WithRegisterer(testRegistry)}
|
||||
testPrometheus := NewPrometheusMetrics(options...)
|
||||
expectedOutput := `
|
||||
# HELP interceptor_request_count_total a counter of requests processed by the interceptor proxy
|
||||
# TYPE interceptor_request_count_total counter
|
||||
interceptor_request_count_total{code="500",host="test-host",method="post",otel_scope_name="keda-interceptor-proxy",otel_scope_version="",path="/test"} 1
|
||||
interceptor_request_count_total{code="200",host="test-host",method="post",otel_scope_name="keda-interceptor-proxy",otel_scope_version="",path="/test"} 1
|
||||
# HELP otel_scope_info Instrumentation Scope metadata
|
||||
# TYPE otel_scope_info gauge
|
||||
otel_scope_info{otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 1
|
||||
# HELP target_info Target metadata
|
||||
# TYPE target_info gauge
|
||||
target_info{"service.name"="interceptor-proxy","service.version"="main"} 1
|
||||
`
|
||||
expectedOutputReader := strings.NewReader(expectedOutput)
|
||||
testPrometheus.RecordRequestCount("post", "/test", 500, "test-host")
|
||||
testPrometheus.RecordRequestCount("post", "/test", 200, "test-host")
|
||||
err := testutil.CollectAndCompare(testRegistry, expectedOutputReader)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestPromPendingRequestCountMetric(t *testing.T) {
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
options := []promexporter.Option{promexporter.WithRegisterer(testRegistry)}
|
||||
testPrometheus := NewPrometheusMetrics(options...)
|
||||
expectedOutput := `
|
||||
# HELP interceptor_pending_request_count a count of requests pending forwarding by the interceptor proxy
|
||||
# TYPE interceptor_pending_request_count gauge
|
||||
interceptor_pending_request_count{host="test-host",otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 10
|
||||
# HELP otel_scope_info Instrumentation Scope metadata
|
||||
# TYPE otel_scope_info gauge
|
||||
otel_scope_info{otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 1
|
||||
# HELP target_info Target metadata
|
||||
# TYPE target_info gauge
|
||||
target_info{"service.name"="interceptor-proxy","service.version"="main"} 1
|
||||
`
|
||||
expectedOutputReader := strings.NewReader(expectedOutput)
|
||||
testPrometheus.RecordPendingRequestCount("test-host", 10)
|
||||
err := testutil.CollectAndCompare(testRegistry, expectedOutputReader)
|
||||
assert.Nil(t, err)
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/metrics"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
|
@ -62,21 +63,25 @@ func (cm *Counting) count(ctx context.Context, signaler util.Signaler) {
|
|||
}
|
||||
|
||||
func (cm *Counting) inc(logger logr.Logger, key string) bool {
|
||||
if err := cm.queueCounter.Resize(key, +1); err != nil {
|
||||
if err := cm.queueCounter.Increase(key, 1); err != nil {
|
||||
logger.Error(err, "error incrementing queue counter", "key", key)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
metrics.RecordPendingRequestCount(key, int64(1))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cm *Counting) dec(logger logr.Logger, key string) bool {
|
||||
if err := cm.queueCounter.Resize(key, -1); err != nil {
|
||||
if err := cm.queueCounter.Decrease(key, 1); err != nil {
|
||||
logger.Error(err, "error decrementing queue counter", "key", key)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
metrics.RecordPendingRequestCount(key, int64(-1))
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ func TestCountMiddleware(t *testing.T) {
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: "testdepl",
|
||||
Service: ":",
|
||||
Port: 8080,
|
||||
Name: "testdepl",
|
||||
Service: "testservice",
|
||||
Port: 8080,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](123),
|
||||
},
|
||||
|
@ -56,10 +56,9 @@ func TestCountMiddleware(t *testing.T) {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
// for a valid request, we expect the queue to be resized twice.
|
||||
// for a valid request, we expect the queue to be modified twice.
|
||||
// once to mark a pending HTTP request, then a second time to remove it.
|
||||
// by the end of both sends, resize1 + resize2 should be 0,
|
||||
// or in other words, the queue size should be back to zero
|
||||
// by the end of both sends, increase1 + decrease1 should be 2
|
||||
|
||||
// run middleware with the host in the request
|
||||
req, err := http.NewRequest("GET", "/something", nil)
|
||||
|
@ -70,7 +69,7 @@ func TestCountMiddleware(t *testing.T) {
|
|||
req = req.WithContext(reqCtx)
|
||||
req.Host = uri.Host
|
||||
|
||||
agg, respRecorder := expectResizes(
|
||||
agg, respRecorder := expectUpdates(
|
||||
ctx,
|
||||
t,
|
||||
2,
|
||||
|
@ -86,10 +85,10 @@ func TestCountMiddleware(t *testing.T) {
|
|||
)
|
||||
r.Equal(http.StatusOK, respRecorder.Code)
|
||||
r.Equal(http.StatusText(respRecorder.Code), respRecorder.Body.String())
|
||||
r.Equal(0, agg)
|
||||
r.Equal(2, agg)
|
||||
}
|
||||
|
||||
// expectResizes creates a new httptest.ResponseRecorder, then passes req through
|
||||
// expectUpdates creates a new httptest.ResponseRecorder, then passes req through
|
||||
// the middleware. every time the middleware calls fakeCounter.Resize(), it calls
|
||||
// resizeCheckFn with t and the queue.HostCount that represents the resize call
|
||||
// that was made. it also maintains an aggregate delta of the counts passed to
|
||||
|
@ -98,7 +97,7 @@ func TestCountMiddleware(t *testing.T) {
|
|||
//
|
||||
// this function returns the aggregate and the httptest.ResponseRecorder that was
|
||||
// created and used with the middleware
|
||||
func expectResizes(
|
||||
func expectUpdates(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
nResizes int,
|
||||
|
|
|
@ -31,7 +31,7 @@ var _ http.Handler = (*Logging)(nil)
|
|||
|
||||
func (lm *Logging) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
r = util.RequestWithLogger(r, lm.logger.WithName("LoggingMiddleware"))
|
||||
w = newLoggingResponseWriter(w)
|
||||
w = newResponseWriter(w)
|
||||
|
||||
var sw util.Stopwatch
|
||||
defer lm.logAsync(w, r, &sw)
|
||||
|
@ -50,9 +50,9 @@ func (lm *Logging) log(w http.ResponseWriter, r *http.Request, sw *util.Stopwatc
|
|||
ctx := r.Context()
|
||||
logger := util.LoggerFromContext(ctx)
|
||||
|
||||
lrw := w.(*loggingResponseWriter)
|
||||
lrw := w.(*responseWriter)
|
||||
if lrw == nil {
|
||||
lrw = newLoggingResponseWriter(w)
|
||||
lrw = newResponseWriter(w)
|
||||
}
|
||||
|
||||
timestamp := sw.StartTime().Format(CombinedLogTimeFormat)
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
downstreamResponseWriter http.ResponseWriter
|
||||
bytesWritten int
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func newLoggingResponseWriter(downstreamResponseWriter http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{
|
||||
downstreamResponseWriter: downstreamResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) BytesWritten() int {
|
||||
return lrw.bytesWritten
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) StatusCode() int {
|
||||
return lrw.statusCode
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = (*loggingResponseWriter)(nil)
|
||||
|
||||
func (lrw *loggingResponseWriter) Header() http.Header {
|
||||
return lrw.downstreamResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
n, err := lrw.downstreamResponseWriter.Write(bytes)
|
||||
if f, ok := lrw.downstreamResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
|
||||
lrw.bytesWritten += n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(statusCode int) {
|
||||
lrw.downstreamResponseWriter.WriteHeader(statusCode)
|
||||
|
||||
lrw.statusCode = statusCode
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/metrics"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
upstreamHandler http.Handler
|
||||
}
|
||||
|
||||
func NewMetrics(upstreamHandler http.Handler) *Metrics {
|
||||
return &Metrics{
|
||||
upstreamHandler: upstreamHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w = newResponseWriter(w)
|
||||
|
||||
defer m.metrics(w, r)
|
||||
|
||||
m.upstreamHandler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *Metrics) metrics(w http.ResponseWriter, r *http.Request) {
|
||||
mrw := w.(*responseWriter)
|
||||
if mrw == nil {
|
||||
mrw = newResponseWriter(w)
|
||||
}
|
||||
|
||||
// exclude readiness & liveness probes from the emitted metrics
|
||||
if r.URL.Path != "/livez" && r.URL.Path != "/readyz" {
|
||||
metrics.RecordRequestCount(r.Method, r.URL.Path, mrw.statusCode, r.Host)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type responseWriter struct {
|
||||
downstreamResponseWriter http.ResponseWriter
|
||||
bytesWritten int
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func newResponseWriter(downstreamResponseWriter http.ResponseWriter) *responseWriter {
|
||||
return &responseWriter{
|
||||
downstreamResponseWriter: downstreamResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *responseWriter) BytesWritten() int {
|
||||
return rw.bytesWritten
|
||||
}
|
||||
|
||||
func (rw *responseWriter) StatusCode() int {
|
||||
return rw.statusCode
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = (*responseWriter)(nil)
|
||||
|
||||
func (rw *responseWriter) Header() http.Header {
|
||||
return rw.downstreamResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Write(bytes []byte) (int, error) {
|
||||
n, err := rw.downstreamResponseWriter.Write(bytes)
|
||||
if f, ok := rw.downstreamResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
|
||||
rw.bytesWritten += n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rw *responseWriter) WriteHeader(statusCode int) {
|
||||
rw.downstreamResponseWriter.WriteHeader(statusCode)
|
||||
|
||||
rw.statusCode = statusCode
|
||||
}
|
|
@ -8,18 +8,18 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("loggingResponseWriter", func() {
|
||||
var _ = Describe("responseWriter", func() {
|
||||
Context("New", func() {
|
||||
It("returns new object with expected field values set", func() {
|
||||
var (
|
||||
w = httptest.NewRecorder()
|
||||
)
|
||||
|
||||
lrw := newLoggingResponseWriter(w)
|
||||
Expect(lrw).NotTo(BeNil())
|
||||
Expect(lrw.downstreamResponseWriter).To(Equal(w))
|
||||
Expect(lrw.bytesWritten).To(Equal(0))
|
||||
Expect(lrw.statusCode).To(Equal(0))
|
||||
rw := newResponseWriter(w)
|
||||
Expect(rw).NotTo(BeNil())
|
||||
Expect(rw.downstreamResponseWriter).To(Equal(w))
|
||||
Expect(rw.bytesWritten).To(Equal(0))
|
||||
Expect(rw.statusCode).To(Equal(0))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -29,11 +29,11 @@ var _ = Describe("loggingResponseWriter", func() {
|
|||
bw = 128
|
||||
)
|
||||
|
||||
lrw := &loggingResponseWriter{
|
||||
rw := &responseWriter{
|
||||
bytesWritten: bw,
|
||||
}
|
||||
|
||||
ret := lrw.BytesWritten()
|
||||
ret := rw.BytesWritten()
|
||||
Expect(ret).To(Equal(bw))
|
||||
})
|
||||
})
|
||||
|
@ -44,11 +44,11 @@ var _ = Describe("loggingResponseWriter", func() {
|
|||
sc = http.StatusTeapot
|
||||
)
|
||||
|
||||
lrw := &loggingResponseWriter{
|
||||
rw := &responseWriter{
|
||||
statusCode: sc,
|
||||
}
|
||||
|
||||
ret := lrw.StatusCode()
|
||||
ret := rw.StatusCode()
|
||||
Expect(ret).To(Equal(sc))
|
||||
})
|
||||
})
|
||||
|
@ -59,14 +59,14 @@ var _ = Describe("loggingResponseWriter", func() {
|
|||
w = httptest.NewRecorder()
|
||||
)
|
||||
|
||||
lrw := &loggingResponseWriter{
|
||||
rw := &responseWriter{
|
||||
downstreamResponseWriter: w,
|
||||
}
|
||||
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "application/json")
|
||||
|
||||
ret := lrw.Header()
|
||||
ret := rw.Header()
|
||||
Expect(ret).To(Equal(h))
|
||||
})
|
||||
})
|
||||
|
@ -83,16 +83,16 @@ var _ = Describe("loggingResponseWriter", func() {
|
|||
w = httptest.NewRecorder()
|
||||
)
|
||||
|
||||
lrw := &loggingResponseWriter{
|
||||
rw := &responseWriter{
|
||||
bytesWritten: initialBW,
|
||||
downstreamResponseWriter: w,
|
||||
}
|
||||
|
||||
n, err := lrw.Write([]byte(body))
|
||||
n, err := rw.Write([]byte(body))
|
||||
Expect(err).To(BeNil())
|
||||
Expect(n).To(Equal(bodyLen))
|
||||
|
||||
Expect(lrw.bytesWritten).To(Equal(initialBW + bodyLen))
|
||||
Expect(rw.bytesWritten).To(Equal(initialBW + bodyLen))
|
||||
|
||||
Expect(w.Body.String()).To(Equal(body))
|
||||
})
|
||||
|
@ -108,13 +108,13 @@ var _ = Describe("loggingResponseWriter", func() {
|
|||
w = httptest.NewRecorder()
|
||||
)
|
||||
|
||||
lrw := &loggingResponseWriter{
|
||||
rw := &responseWriter{
|
||||
statusCode: http.StatusOK,
|
||||
downstreamResponseWriter: w,
|
||||
}
|
||||
lrw.WriteHeader(sc)
|
||||
rw.WriteHeader(sc)
|
||||
|
||||
Expect(lrw.statusCode).To(Equal(sc))
|
||||
Expect(rw.statusCode).To(Equal(sc))
|
||||
|
||||
Expect(w.Code).To(Equal(sc))
|
||||
})
|
|
@ -1,6 +1,7 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -8,25 +9,32 @@ import (
|
|||
|
||||
"github.com/kedacore/http-add-on/interceptor/handler"
|
||||
httpv1alpha1 "github.com/kedacore/http-add-on/operator/apis/http/v1alpha1"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
"github.com/kedacore/http-add-on/pkg/routing"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
var (
|
||||
kpUserAgent = regexp.MustCompile(`(^|\s)kube-probe/`)
|
||||
kubernetesProbeUserAgent = regexp.MustCompile(`(^|\s)kube-probe/`)
|
||||
googleHCUserAgent = regexp.MustCompile(`(^|\s)GoogleHC/`)
|
||||
awsELBserAgent = regexp.MustCompile(`(^|\s)ELB-HealthChecker/`)
|
||||
)
|
||||
|
||||
type Routing struct {
|
||||
routingTable routing.Table
|
||||
probeHandler http.Handler
|
||||
upstreamHandler http.Handler
|
||||
svcCache k8s.ServiceCache
|
||||
tlsEnabled bool
|
||||
}
|
||||
|
||||
func NewRouting(routingTable routing.Table, probeHandler http.Handler, upstreamHandler http.Handler) *Routing {
|
||||
func NewRouting(routingTable routing.Table, probeHandler http.Handler, upstreamHandler http.Handler, svcCache k8s.ServiceCache, tlsEnabled bool) *Routing {
|
||||
return &Routing{
|
||||
routingTable: routingTable,
|
||||
probeHandler: probeHandler,
|
||||
upstreamHandler: upstreamHandler,
|
||||
svcCache: svcCache,
|
||||
tlsEnabled: tlsEnabled,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,7 +45,7 @@ func (rm *Routing) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
httpso := rm.routingTable.Route(r)
|
||||
if httpso == nil {
|
||||
if rm.isKubeProbe(r) {
|
||||
if rm.isProbe(r) {
|
||||
rm.probeHandler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
@ -49,7 +57,7 @@ func (rm *Routing) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
r = r.WithContext(util.ContextWithHTTPSO(r.Context(), httpso))
|
||||
|
||||
stream, err := rm.streamFromHTTPSO(httpso)
|
||||
stream, err := rm.streamFromHTTPSO(r.Context(), httpso, httpso.Spec.ScaleTargetRef)
|
||||
if err != nil {
|
||||
sh := handler.NewStatic(http.StatusInternalServerError, err)
|
||||
sh.ServeHTTP(w, r)
|
||||
|
@ -58,20 +66,68 @@ func (rm *Routing) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
r = r.WithContext(util.ContextWithStream(r.Context(), stream))
|
||||
|
||||
if httpso.Spec.ColdStartTimeoutFailoverRef != nil {
|
||||
failoverStream, err := rm.streamFromHTTPSO(r.Context(), httpso, httpso.Spec.ColdStartTimeoutFailoverRef)
|
||||
if err != nil {
|
||||
sh := handler.NewStatic(http.StatusInternalServerError, err)
|
||||
sh.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
r = r.WithContext(util.ContextWithFailoverStream(r.Context(), failoverStream))
|
||||
}
|
||||
|
||||
rm.upstreamHandler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (rm *Routing) streamFromHTTPSO(httpso *httpv1alpha1.HTTPScaledObject) (*url.URL, error) {
|
||||
func (rm *Routing) getPort(ctx context.Context, httpso *httpv1alpha1.HTTPScaledObject, reference httpv1alpha1.Ref) (int32, error) {
|
||||
var (
|
||||
port = reference.GetPort()
|
||||
portName = reference.GetPortName()
|
||||
serviceName = reference.GetServiceName()
|
||||
)
|
||||
|
||||
if port != 0 {
|
||||
return port, nil
|
||||
}
|
||||
if portName == "" {
|
||||
return 0, fmt.Errorf(`must specify either "port" or "portName"`)
|
||||
}
|
||||
svc, err := rm.svcCache.Get(ctx, httpso.GetNamespace(), serviceName)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get Service: %w", err)
|
||||
}
|
||||
for _, port := range svc.Spec.Ports {
|
||||
if port.Name == portName {
|
||||
return port.Port, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("portName %q not found in Service", portName)
|
||||
}
|
||||
|
||||
func (rm *Routing) streamFromHTTPSO(ctx context.Context, httpso *httpv1alpha1.HTTPScaledObject, reference httpv1alpha1.Ref) (*url.URL, error) {
|
||||
port, err := rm.getPort(ctx, httpso, reference)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get port: %w", err)
|
||||
}
|
||||
if rm.tlsEnabled {
|
||||
return url.Parse(fmt.Sprintf(
|
||||
"https://%s.%s:%d",
|
||||
reference.GetServiceName(),
|
||||
httpso.GetNamespace(),
|
||||
port,
|
||||
))
|
||||
}
|
||||
//goland:noinspection HttpUrlsUsage
|
||||
return url.Parse(fmt.Sprintf(
|
||||
"http://%s.%s:%d",
|
||||
httpso.Spec.ScaleTargetRef.Service,
|
||||
reference.GetServiceName(),
|
||||
httpso.GetNamespace(),
|
||||
httpso.Spec.ScaleTargetRef.Port,
|
||||
port,
|
||||
))
|
||||
}
|
||||
|
||||
func (rm *Routing) isKubeProbe(r *http.Request) bool {
|
||||
func (rm *Routing) isProbe(r *http.Request) bool {
|
||||
ua := r.UserAgent()
|
||||
return kpUserAgent.Match([]byte(ua))
|
||||
|
||||
return kubernetesProbeUserAgent.Match([]byte(ua)) || googleHCUserAgent.Match([]byte(ua)) || awsELBserAgent.Match([]byte(ua))
|
||||
}
|
||||
|
|
|
@ -6,8 +6,11 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
httpv1alpha1 "github.com/kedacore/http-add-on/operator/apis/http/v1alpha1"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
routingtest "github.com/kedacore/http-add-on/pkg/routing/test"
|
||||
)
|
||||
|
||||
|
@ -19,10 +22,12 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
probeHandler = http.NewServeMux()
|
||||
upstreamHandler = http.NewServeMux()
|
||||
)
|
||||
probeHandler.Handle("/probe", http.HandlerFunc(nil))
|
||||
upstreamHandler.Handle("/upstream", http.HandlerFunc(nil))
|
||||
emptyHandler := http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})
|
||||
probeHandler.Handle("/probe", emptyHandler)
|
||||
upstreamHandler.Handle("/upstream", emptyHandler)
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
rm := NewRouting(routingTable, probeHandler, upstreamHandler)
|
||||
rm := NewRouting(routingTable, probeHandler, upstreamHandler, svcCache, false)
|
||||
Expect(rm).NotTo(BeNil())
|
||||
Expect(rm.routingTable).To(Equal(routingTable))
|
||||
Expect(rm.probeHandler).To(Equal(probeHandler))
|
||||
|
@ -39,6 +44,7 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
var (
|
||||
upstreamHandler *http.ServeMux
|
||||
probeHandler *http.ServeMux
|
||||
svcCache *k8s.FakeServiceCache
|
||||
routingTable *routingtest.Table
|
||||
routingMiddleware *Routing
|
||||
w *httptest.ResponseRecorder
|
||||
|
@ -49,6 +55,39 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
Hosts: []string{
|
||||
host,
|
||||
},
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Port: 80,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
httpsoWithPortName = httpv1alpha1.HTTPScaledObject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "keda",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
Hosts: []string{
|
||||
"keda2.sh",
|
||||
},
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Service: "keda-svc",
|
||||
PortName: "http",
|
||||
},
|
||||
},
|
||||
}
|
||||
svc = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "keda-svc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -57,7 +96,8 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
upstreamHandler = http.NewServeMux()
|
||||
probeHandler = http.NewServeMux()
|
||||
routingTable = routingtest.NewTable()
|
||||
routingMiddleware = NewRouting(routingTable, probeHandler, upstreamHandler)
|
||||
svcCache = k8s.NewFakeServiceCache()
|
||||
routingMiddleware = NewRouting(routingTable, probeHandler, upstreamHandler, svcCache, false)
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
|
||||
|
@ -90,7 +130,6 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
routingTable.Memory[host] = &httpso
|
||||
|
||||
routingMiddleware.ServeHTTP(w, r)
|
||||
|
||||
Expect(uh).To(BeTrue())
|
||||
Expect(ph).To(BeFalse())
|
||||
Expect(w.Code).To(Equal(sc))
|
||||
|
@ -98,6 +137,73 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
})
|
||||
})
|
||||
|
||||
When("route is found with portName", func() {
|
||||
It("routes to the upstream handler", func() {
|
||||
svcCache.Add(*svc)
|
||||
var (
|
||||
sc = http.StatusTeapot
|
||||
st = http.StatusText(sc)
|
||||
)
|
||||
|
||||
var uh bool
|
||||
upstreamHandler.Handle(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusTeapot)
|
||||
|
||||
_, err := w.Write([]byte(st))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
uh = true
|
||||
}))
|
||||
|
||||
var ph bool
|
||||
probeHandler.Handle(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ph = true
|
||||
}))
|
||||
|
||||
routingTable.Memory["keda2.sh"] = &httpsoWithPortName
|
||||
|
||||
r.Host = "keda2.sh"
|
||||
routingMiddleware.ServeHTTP(w, r)
|
||||
Expect(uh).To(BeTrue())
|
||||
Expect(ph).To(BeFalse())
|
||||
Expect(w.Code).To(Equal(sc))
|
||||
Expect(w.Body.String()).To(Equal(st))
|
||||
})
|
||||
})
|
||||
|
||||
When("route is found with portName but endpoints are mismatched", func() {
|
||||
It("errors to route to upstream handler", func() {
|
||||
var (
|
||||
sc = http.StatusTeapot
|
||||
st = http.StatusText(sc)
|
||||
)
|
||||
|
||||
var uh bool
|
||||
upstreamHandler.Handle(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusTeapot)
|
||||
|
||||
_, err := w.Write([]byte(st))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
uh = true
|
||||
}))
|
||||
|
||||
var ph bool
|
||||
probeHandler.Handle(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ph = true
|
||||
}))
|
||||
|
||||
routingTable.Memory["keda2.sh"] = &httpsoWithPortName
|
||||
|
||||
r.Host = "keda2.sh"
|
||||
routingMiddleware.ServeHTTP(w, r)
|
||||
Expect(uh).To(BeFalse())
|
||||
Expect(ph).To(BeFalse())
|
||||
Expect(w.Code).To(Equal(http.StatusInternalServerError))
|
||||
Expect(w.Body.String()).To(Equal("Internal Server Error"))
|
||||
})
|
||||
})
|
||||
|
||||
When("route is not found", func() {
|
||||
It("routes to the probe handler", func() {
|
||||
const (
|
||||
|
@ -182,11 +288,35 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
r.Header.Set(uaKey, uaVal)
|
||||
|
||||
var rm Routing
|
||||
b := rm.isKubeProbe(r)
|
||||
b := rm.isProbe(r)
|
||||
Expect(b).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns false if the request is not from kube-probe", func() {
|
||||
It("returns true if the request is from GoogleHC", func() {
|
||||
const (
|
||||
uaVal = "Go-http-client/1.1 GoogleHC/1.0 (linux/amd64) kubernetes/4c94112"
|
||||
)
|
||||
|
||||
r.Header.Set(uaKey, uaVal)
|
||||
|
||||
var rm Routing
|
||||
b := rm.isProbe(r)
|
||||
Expect(b).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns true if the request is from AWS ELB", func() {
|
||||
const (
|
||||
uaVal = "Go-http-client/1.1 ELB-HealthChecker/2.0 (linux/amd64) kubernetes/4c94112"
|
||||
)
|
||||
|
||||
r.Header.Set(uaKey, uaVal)
|
||||
|
||||
var rm Routing
|
||||
b := rm.isProbe(r)
|
||||
Expect(b).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns false if the request is not from kube-probe or GoogleHC or ELB-HealthChecker", func() {
|
||||
const (
|
||||
uaVal = "Go-http-client/1.1 kubectl/v1.27.1 (linux/amd64) kubernetes/4c94112"
|
||||
)
|
||||
|
@ -194,7 +324,7 @@ var _ = Describe("RoutingMiddleware", func() {
|
|||
r.Header.Set(uaKey, uaVal)
|
||||
|
||||
var rm Routing
|
||||
b := rm.isKubeProbe(r)
|
||||
b := rm.isProbe(r)
|
||||
Expect(b).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -2,11 +2,14 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/handler"
|
||||
|
@ -26,7 +29,7 @@ type forwardingConfig struct {
|
|||
|
||||
func newForwardingConfigFromTimeouts(t *config.Timeouts) forwardingConfig {
|
||||
return forwardingConfig{
|
||||
waitTimeout: t.DeploymentReplicas,
|
||||
waitTimeout: t.WorkloadReplicas,
|
||||
respHeaderTimeout: t.ResponseHeader,
|
||||
forceAttemptHTTP2: t.ForceHTTP2,
|
||||
maxIdleConns: t.MaxIdleConns,
|
||||
|
@ -47,43 +50,67 @@ func newForwardingHandler(
|
|||
dialCtxFunc kedanet.DialContextFunc,
|
||||
waitFunc forwardWaitFunc,
|
||||
fwdCfg forwardingConfig,
|
||||
tlsCfg *tls.Config,
|
||||
tracingCfg *config.Tracing,
|
||||
) http.Handler {
|
||||
roundTripper := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: dialCtxFunc,
|
||||
ForceAttemptHTTP2: fwdCfg.forceAttemptHTTP2,
|
||||
MaxIdleConns: fwdCfg.maxIdleConns,
|
||||
IdleConnTimeout: fwdCfg.idleConnTimeout,
|
||||
TLSHandshakeTimeout: fwdCfg.tlsHandshakeTimeout,
|
||||
ExpectContinueTimeout: fwdCfg.expectContinueTimeout,
|
||||
ResponseHeaderTimeout: fwdCfg.respHeaderTimeout,
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var uh *handler.Upstream
|
||||
ctx := r.Context()
|
||||
httpso := util.HTTPSOFromContext(ctx)
|
||||
hasFailover := httpso.Spec.ColdStartTimeoutFailoverRef != nil
|
||||
|
||||
waitFuncCtx, done := context.WithTimeout(r.Context(), fwdCfg.waitTimeout)
|
||||
conditionWaitTimeout := fwdCfg.waitTimeout
|
||||
roundTripper := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: dialCtxFunc,
|
||||
ForceAttemptHTTP2: fwdCfg.forceAttemptHTTP2,
|
||||
MaxIdleConns: fwdCfg.maxIdleConns,
|
||||
IdleConnTimeout: fwdCfg.idleConnTimeout,
|
||||
TLSHandshakeTimeout: fwdCfg.tlsHandshakeTimeout,
|
||||
ExpectContinueTimeout: fwdCfg.expectContinueTimeout,
|
||||
ResponseHeaderTimeout: fwdCfg.respHeaderTimeout,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
|
||||
if httpso.Spec.Timeouts != nil {
|
||||
if httpso.Spec.Timeouts.ConditionWait.Duration > 0 {
|
||||
conditionWaitTimeout = httpso.Spec.Timeouts.ConditionWait.Duration
|
||||
}
|
||||
|
||||
if httpso.Spec.Timeouts.ResponseHeader.Duration > 0 {
|
||||
roundTripper.ResponseHeaderTimeout = httpso.Spec.Timeouts.ResponseHeader.Duration
|
||||
}
|
||||
}
|
||||
|
||||
if hasFailover && httpso.Spec.ColdStartTimeoutFailoverRef.TimeoutSeconds > 0 {
|
||||
conditionWaitTimeout = time.Duration(httpso.Spec.ColdStartTimeoutFailoverRef.TimeoutSeconds) * time.Second
|
||||
}
|
||||
|
||||
waitFuncCtx, done := context.WithTimeout(ctx, conditionWaitTimeout)
|
||||
defer done()
|
||||
replicas, err := waitFunc(
|
||||
isColdStart, err := waitFunc(
|
||||
waitFuncCtx,
|
||||
httpso.GetNamespace(),
|
||||
httpso.Spec.ScaleTargetRef.Deployment,
|
||||
httpso.Spec.ScaleTargetRef.Service,
|
||||
)
|
||||
if err != nil {
|
||||
if err != nil && !hasFailover {
|
||||
lggr.Error(err, "wait function failed, not forwarding request")
|
||||
w.WriteHeader(http.StatusBadGateway)
|
||||
if _, err := w.Write([]byte(fmt.Sprintf("error on backend (%s)", err))); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "error on backend (%s)", err); err != nil {
|
||||
lggr.Error(err, "could not write error response to client")
|
||||
}
|
||||
return
|
||||
}
|
||||
isColdStart := "false"
|
||||
if replicas == 0 {
|
||||
isColdStart = "true"
|
||||
}
|
||||
w.Header().Add("X-KEDA-HTTP-Cold-Start", isColdStart)
|
||||
w.Header().Add("X-KEDA-HTTP-Cold-Start", strconv.FormatBool(isColdStart))
|
||||
r.Header.Add("X-KEDA-HTTP-Cold-Start-Ref-Name", httpso.Spec.ScaleTargetRef.Name)
|
||||
r.Header.Add("X-KEDA-HTTP-Cold-Start-Ref-Namespace", httpso.Namespace)
|
||||
|
||||
uh := handler.NewUpstream(roundTripper)
|
||||
shouldFailover := hasFailover && err != nil
|
||||
if tracingCfg.Enabled {
|
||||
uh = handler.NewUpstream(otelhttp.NewTransport(roundTripper), tracingCfg, shouldFailover)
|
||||
} else {
|
||||
uh = handler.NewUpstream(roundTripper, &config.Tracing{}, shouldFailover)
|
||||
}
|
||||
uh.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -16,10 +17,11 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/middleware"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
|
@ -29,13 +31,14 @@ import (
|
|||
// happy path - deployment is scaled to 1 and host in routing table
|
||||
func TestIntegrationHappyPath(t *testing.T) {
|
||||
const (
|
||||
deploymentReplicasTimeout = 200 * time.Millisecond
|
||||
deplName = "testdeployment"
|
||||
activeEndpointsTimeout = 200 * time.Millisecond
|
||||
deploymentName = "testdeployment"
|
||||
serviceName = "testservice"
|
||||
)
|
||||
r := require.New(t)
|
||||
h, err := newHarness(
|
||||
t,
|
||||
deploymentReplicasTimeout,
|
||||
activeEndpointsTimeout,
|
||||
)
|
||||
r.NoError(err)
|
||||
defer h.close()
|
||||
|
@ -47,23 +50,18 @@ func TestIntegrationHappyPath(t *testing.T) {
|
|||
target := targetFromURL(
|
||||
h.originURL,
|
||||
originPort,
|
||||
deplName,
|
||||
deploymentName,
|
||||
serviceName,
|
||||
)
|
||||
h.routingTable.Memory[hostForTest(t)] = target
|
||||
|
||||
h.deplCache.Set(target.GetNamespace(), deplName, appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: deplName},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
// note that the forwarding wait function doesn't care about
|
||||
// the replicas field, it only cares about ReadyReplicas in the status.
|
||||
// regardless, we're setting this because in a running cluster,
|
||||
// it's likely that most of the time, this is equal to ReadyReplicas
|
||||
Replicas: i32Ptr(3),
|
||||
},
|
||||
Status: appsv1.DeploymentStatus{
|
||||
ReadyReplicas: 3,
|
||||
h.endpCache.Set(v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: target.GetNamespace(),
|
||||
},
|
||||
})
|
||||
r.NoError(h.endpCache.SetSubsets(target.GetNamespace(), serviceName, 1))
|
||||
|
||||
// happy path
|
||||
res, err := doRequest(
|
||||
|
@ -79,7 +77,7 @@ func TestIntegrationHappyPath(t *testing.T) {
|
|||
// deployment scaled to 1 but host not in routing table
|
||||
//
|
||||
// NOTE: the interceptor needs to check in the routing table
|
||||
// _before_ checking the deployment cache, so we don't technically
|
||||
// _before_ checking the endpoints cache, so we don't technically
|
||||
// need to set the replicas to 1, but we're doing so anyway to
|
||||
// isolate the routing table behavior
|
||||
func TestIntegrationNoRoutingTableEntry(t *testing.T) {
|
||||
|
@ -103,12 +101,13 @@ func TestIntegrationNoRoutingTableEntry(t *testing.T) {
|
|||
// host in the routing table but deployment has no replicas
|
||||
func TestIntegrationNoReplicas(t *testing.T) {
|
||||
const (
|
||||
deployTimeout = 100 * time.Millisecond
|
||||
activeEndpointsTimeout = 100 * time.Millisecond
|
||||
)
|
||||
host := hostForTest(t)
|
||||
deployName := "testdeployment"
|
||||
deploymentName := "testdeployment"
|
||||
serviceName := "testservice"
|
||||
r := require.New(t)
|
||||
h, err := newHarness(t, deployTimeout)
|
||||
h, err := newHarness(t, activeEndpointsTimeout)
|
||||
r.NoError(err)
|
||||
|
||||
originPort, err := strconv.Atoi(h.originURL.Port())
|
||||
|
@ -117,15 +116,16 @@ func TestIntegrationNoReplicas(t *testing.T) {
|
|||
target := targetFromURL(
|
||||
h.originURL,
|
||||
originPort,
|
||||
deployName,
|
||||
deploymentName,
|
||||
serviceName,
|
||||
)
|
||||
h.routingTable.Memory[hostForTest(t)] = target
|
||||
|
||||
// 0 replicas
|
||||
h.deplCache.Set(target.GetNamespace(), deployName, appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: deployName},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: i32Ptr(0),
|
||||
h.endpCache.Set(v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: target.GetNamespace(),
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -139,22 +139,23 @@ func TestIntegrationNoReplicas(t *testing.T) {
|
|||
r.Equal(502, res.StatusCode)
|
||||
res.Body.Close()
|
||||
elapsed := time.Since(start)
|
||||
// we should have slept more than the deployment replicas wait timeout
|
||||
r.GreaterOrEqual(elapsed, deployTimeout)
|
||||
r.Less(elapsed, deployTimeout+50*time.Millisecond)
|
||||
// we should have slept more than the active endpoints wait timeout
|
||||
r.GreaterOrEqual(elapsed, activeEndpointsTimeout)
|
||||
r.Less(elapsed, activeEndpointsTimeout+50*time.Millisecond)
|
||||
}
|
||||
|
||||
// the request comes in while there are no replicas, and one is added
|
||||
// while it's pending
|
||||
func TestIntegrationWaitReplicas(t *testing.T) {
|
||||
const (
|
||||
deployTimeout = 2 * time.Second
|
||||
responseTimeout = 1 * time.Second
|
||||
deployName = "testdeployment"
|
||||
activeEndpointsTimeout = 2 * time.Second
|
||||
responseTimeout = 1 * time.Second
|
||||
deploymentName = "testdeployment"
|
||||
serviceName = "testservice"
|
||||
)
|
||||
ctx := context.Background()
|
||||
r := require.New(t)
|
||||
h, err := newHarness(t, deployTimeout)
|
||||
h, err := newHarness(t, activeEndpointsTimeout)
|
||||
r.NoError(err)
|
||||
|
||||
// add host to routing table
|
||||
|
@ -164,24 +165,25 @@ func TestIntegrationWaitReplicas(t *testing.T) {
|
|||
target := targetFromURL(
|
||||
h.originURL,
|
||||
originPort,
|
||||
deployName,
|
||||
deploymentName,
|
||||
serviceName,
|
||||
)
|
||||
h.routingTable.Memory[hostForTest(t)] = target
|
||||
|
||||
// set up a deployment with zero replicas and create
|
||||
// a watcher we can use later to fake-send a deployment
|
||||
// set up a endpoint with zero replicas and create
|
||||
// a watcher we can use later to fake-send a endpoint
|
||||
// event
|
||||
initialDeployment := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: deployName},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: i32Ptr(0),
|
||||
h.endpCache.Set(v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: target.GetNamespace(),
|
||||
},
|
||||
}
|
||||
h.deplCache.Set(target.GetNamespace(), deployName, initialDeployment)
|
||||
watcher := h.deplCache.SetWatcher(target.GetNamespace(), deployName)
|
||||
})
|
||||
endpoints, _ := h.endpCache.Get(target.GetNamespace(), serviceName)
|
||||
watcher := h.endpCache.SetWatcher(target.GetNamespace(), serviceName)
|
||||
|
||||
// make the request in one goroutine, and in the other, wait a bit
|
||||
// and then add replicas to the deployment cache
|
||||
// and then add replicas to the endpoints cache
|
||||
|
||||
var response *http.Response
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
|
@ -198,20 +200,23 @@ func TestIntegrationWaitReplicas(t *testing.T) {
|
|||
resp.Body.Close()
|
||||
return nil
|
||||
})
|
||||
const sleepDur = deployTimeout / 4
|
||||
const sleepDur = activeEndpointsTimeout / 4
|
||||
grp.Go(func() error {
|
||||
t.Logf("Sleeping for %s", sleepDur)
|
||||
time.Sleep(sleepDur)
|
||||
t.Logf("Woke up, setting replicas to 10")
|
||||
modifiedDeployment := initialDeployment.DeepCopy()
|
||||
// note that the wait function only cares about Status.ReadyReplicas
|
||||
// but we're setting Spec.Replicas to 10 as well because the common
|
||||
// case in the cluster is that they would be equal
|
||||
modifiedDeployment.Spec.Replicas = i32Ptr(10)
|
||||
modifiedDeployment.Status.ReadyReplicas = 10
|
||||
|
||||
modifiedEndpoints := endpoints.DeepCopy()
|
||||
modifiedEndpoints.Subsets = []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
}
|
||||
// send a watch event (instead of setting replicas) so that the watch
|
||||
// func sees that it can forward the request now
|
||||
watcher.Modify(modifiedDeployment)
|
||||
watcher.Modify(modifiedEndpoints)
|
||||
return nil
|
||||
})
|
||||
start := time.Now()
|
||||
|
@ -256,13 +261,13 @@ type harness struct {
|
|||
originURL *url.URL
|
||||
routingTable *routingtest.Table
|
||||
dialCtxFunc kedanet.DialContextFunc
|
||||
deplCache *k8s.FakeDeploymentCache
|
||||
endpCache *k8s.FakeEndpointsCache
|
||||
waitFunc forwardWaitFunc
|
||||
}
|
||||
|
||||
func newHarness(
|
||||
t *testing.T,
|
||||
deployReplicasTimeout time.Duration,
|
||||
activeEndpointsTimeout time.Duration,
|
||||
) (*harness, error) {
|
||||
t.Helper()
|
||||
lggr := logr.Discard()
|
||||
|
@ -277,10 +282,11 @@ func newHarness(
|
|||
},
|
||||
)
|
||||
|
||||
deplCache := k8s.NewFakeDeploymentCache()
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
endpCache := k8s.NewFakeEndpointsCache()
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
deplCache,
|
||||
endpCache,
|
||||
)
|
||||
|
||||
originHdl := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -300,10 +306,14 @@ func newHarness(
|
|||
dialContextFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: deployReplicasTimeout,
|
||||
waitTimeout: activeEndpointsTimeout,
|
||||
respHeaderTimeout: time.Second,
|
||||
},
|
||||
))
|
||||
&tls.Config{},
|
||||
&config.Tracing{}),
|
||||
svcCache,
|
||||
false,
|
||||
)
|
||||
|
||||
proxySrv, proxySrvURL, err := kedanet.StartTestServer(proxyHdl)
|
||||
if err != nil {
|
||||
|
@ -320,7 +330,7 @@ func newHarness(
|
|||
originURL: originSrvURL,
|
||||
routingTable: routingTable,
|
||||
dialCtxFunc: dialContextFunc,
|
||||
deplCache: deplCache,
|
||||
endpCache: endpCache,
|
||||
waitFunc: waitFunc,
|
||||
}, nil
|
||||
}
|
||||
|
@ -338,10 +348,6 @@ func (h *harness) String() string {
|
|||
)
|
||||
}
|
||||
|
||||
func i32Ptr(i int32) *int32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
func hostForTest(t *testing.T) string {
|
||||
t.Helper()
|
||||
return fmt.Sprintf("%s.integrationtest.interceptor.kedahttp.dev", t.Name())
|
||||
|
|
|
@ -2,10 +2,15 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -23,6 +28,25 @@ import (
|
|||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
var TestTLSConfig = tls.Config{}
|
||||
|
||||
func init() {
|
||||
caCert, err := os.ReadFile("../certs/tls.crt")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting tests certs - make sure to run make test to generate them: %v", err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
cert, err := tls.LoadX509KeyPair("../certs/tls.crt", "../certs/tls.key")
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting tests certs - make sure to run make test to generate them %v", err)
|
||||
}
|
||||
|
||||
TestTLSConfig.RootCAs = caCertPool
|
||||
TestTLSConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
// the proxy should successfully forward a request to a running server
|
||||
func TestImmediatelySuccessfulProxy(t *testing.T) {
|
||||
host := fmt.Sprintf("%s.testing", t.Name())
|
||||
|
@ -43,17 +67,19 @@ func TestImmediatelySuccessfulProxy(t *testing.T) {
|
|||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
waitFunc := func(context.Context, string, string) (int, error) {
|
||||
return 1, nil
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.DeploymentReplicas,
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
|
@ -62,6 +88,7 @@ func TestImmediatelySuccessfulProxy(t *testing.T) {
|
|||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testservice",
|
||||
))
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
req.Host = host
|
||||
|
@ -73,6 +100,128 @@ func TestImmediatelySuccessfulProxy(t *testing.T) {
|
|||
r.Equal("test response", res.Body.String())
|
||||
}
|
||||
|
||||
func TestImmediatelySuccessfulProxyTLS(t *testing.T) {
|
||||
host := fmt.Sprintf("%s.testing", t.Name())
|
||||
r := require.New(t)
|
||||
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, err := w.Write([]byte("test response"))
|
||||
r.NoError(err)
|
||||
}),
|
||||
)
|
||||
srv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer srv.Close()
|
||||
originPort, err := strconv.Atoi(originURL.Port())
|
||||
r.NoError(err)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&TestTLSConfig,
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req, targetFromURL(
|
||||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testsvc",
|
||||
))
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
req.Host = host
|
||||
|
||||
hdl.ServeHTTP(res, req)
|
||||
|
||||
r.Equal("false", res.Header().Get("X-KEDA-HTTP-Cold-Start"), "expected X-KEDA-HTTP-Cold-Start false")
|
||||
r.Equal(200, res.Code, "expected response code 200")
|
||||
r.Equal("test response", res.Body.String())
|
||||
}
|
||||
|
||||
// the proxy should successfully forward a request to the failover when the server is not reachable
|
||||
func TestImmediatelySuccessfulFailoverProxy(t *testing.T) {
|
||||
host := fmt.Sprintf("%s.testing", t.Name())
|
||||
r := require.New(t)
|
||||
|
||||
initialStream, err := url.Parse("http://0.0.0.0:0")
|
||||
r.NoError(err)
|
||||
|
||||
failoverHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, err := w.Write([]byte("test response"))
|
||||
r.NoError(err)
|
||||
}),
|
||||
)
|
||||
srv, failoverURL, err := kedanet.StartTestServer(failoverHdl)
|
||||
r.NoError(err)
|
||||
defer srv.Close()
|
||||
failoverPort, err := strconv.Atoi(failoverURL.Port())
|
||||
r.NoError(err)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
waitFunc := func(ctx context.Context, _ string, _ string) (bool, error) {
|
||||
return false, errors.New("nothing")
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: 0,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req,
|
||||
&httpv1alpha1.HTTPScaledObject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "@" + host,
|
||||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Name: "testdepl",
|
||||
Service: "testsvc",
|
||||
Port: int32(456),
|
||||
},
|
||||
ColdStartTimeoutFailoverRef: &httpv1alpha1.ColdStartTimeoutFailoverRef{
|
||||
Service: "testsvc",
|
||||
Port: int32(failoverPort),
|
||||
TimeoutSeconds: 30,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](123),
|
||||
},
|
||||
},
|
||||
)
|
||||
req = util.RequestWithStream(req, initialStream)
|
||||
req = util.RequestWithFailoverStream(req, failoverURL)
|
||||
req.Host = host
|
||||
|
||||
hdl.ServeHTTP(res, req)
|
||||
|
||||
r.Equal(200, res.Code, "expected response code 200")
|
||||
r.Equal("test response", res.Body.String())
|
||||
}
|
||||
|
||||
// the proxy should wait for a timeout and fail if there is no
|
||||
// origin to which to connect
|
||||
func TestWaitFailedConnection(t *testing.T) {
|
||||
|
@ -86,17 +235,19 @@ func TestWaitFailedConnection(t *testing.T) {
|
|||
timeouts,
|
||||
backoff,
|
||||
)
|
||||
waitFunc := func(context.Context, string, string) (int, error) {
|
||||
return 1, nil
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.DeploymentReplicas,
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
stream, err := url.Parse("http://0.0.0.0:0")
|
||||
r.NoError(err)
|
||||
|
@ -109,9 +260,60 @@ func TestWaitFailedConnection(t *testing.T) {
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: "nosuchdepl",
|
||||
Service: "nosuchdepl",
|
||||
Port: 8081,
|
||||
Service: "nosuchdepl",
|
||||
Port: 8081,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
})
|
||||
req = util.RequestWithStream(req, stream)
|
||||
req.Host = host
|
||||
|
||||
hdl.ServeHTTP(res, req)
|
||||
|
||||
r.Equal("false", res.Header().Get("X-KEDA-HTTP-Cold-Start"), "expected X-KEDA-HTTP-Cold-Start false")
|
||||
r.Equal(502, res.Code, "response code was unexpected")
|
||||
}
|
||||
|
||||
func TestWaitFailedConnectionTLS(t *testing.T) {
|
||||
const host = "TestWaitFailedConnection.testing"
|
||||
r := require.New(t)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
backoff := timeouts.DefaultBackoff()
|
||||
backoff.Steps = 2
|
||||
dialCtxFunc := retryDialContextFunc(
|
||||
timeouts,
|
||||
backoff,
|
||||
)
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&TestTLSConfig,
|
||||
&config.Tracing{},
|
||||
)
|
||||
stream, err := url.Parse("http://0.0.0.0:0")
|
||||
r.NoError(err)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req, &httpv1alpha1.HTTPScaledObject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "testns",
|
||||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Name: "nosuchdepl",
|
||||
Service: "nosuchdepl",
|
||||
Port: 8081,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
|
@ -131,7 +333,7 @@ func TestTimesOutOnWaitFunc(t *testing.T) {
|
|||
r := require.New(t)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
timeouts.DeploymentReplicas = 25 * time.Millisecond
|
||||
timeouts.WorkloadReplicas = 25 * time.Millisecond
|
||||
timeouts.ResponseHeader = 25 * time.Millisecond
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
|
||||
|
@ -144,9 +346,11 @@ func TestTimesOutOnWaitFunc(t *testing.T) {
|
|||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.DeploymentReplicas,
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
stream, err := url.Parse("http://1.1.1.1")
|
||||
r.NoError(err)
|
||||
|
@ -159,9 +363,8 @@ func TestTimesOutOnWaitFunc(t *testing.T) {
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: "nosuchdepl",
|
||||
Service: "nosuchsvc",
|
||||
Port: 9091,
|
||||
Service: "nosuchsvc",
|
||||
Port: 9091,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
|
@ -176,8 +379,82 @@ func TestTimesOutOnWaitFunc(t *testing.T) {
|
|||
t.Logf("elapsed time was %s", elapsed)
|
||||
// serving should take at least timeouts.DeploymentReplicas, but no more than
|
||||
// timeouts.DeploymentReplicas*4
|
||||
r.GreaterOrEqual(elapsed, timeouts.DeploymentReplicas)
|
||||
r.LessOrEqual(elapsed, timeouts.DeploymentReplicas*4)
|
||||
r.GreaterOrEqual(elapsed, timeouts.WorkloadReplicas)
|
||||
r.LessOrEqual(elapsed, timeouts.WorkloadReplicas*4)
|
||||
r.Equal(502, res.Code, "response code was unexpected")
|
||||
|
||||
// we will always return the X-KEDA-HTTP-Cold-Start header
|
||||
// when we are able to forward the
|
||||
// request to the backend but not if we have failed due
|
||||
// to a timeout from a waitFunc or earlier in the pipeline,
|
||||
// for example, if we cannot reach the Kubernetes control
|
||||
// plane.
|
||||
r.Equal("", res.Header().Get("X-KEDA-HTTP-Cold-Start"), "expected X-KEDA-HTTP-Cold-Start to be empty")
|
||||
|
||||
// waitFunc should have been called, even though it timed out
|
||||
waitFuncCalled := false
|
||||
select {
|
||||
case <-waitFuncCalledCh:
|
||||
waitFuncCalled = true
|
||||
default:
|
||||
}
|
||||
|
||||
r.True(waitFuncCalled, "wait function was not called")
|
||||
}
|
||||
|
||||
func TestTimesOutOnWaitFuncTLS(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
timeouts.WorkloadReplicas = 25 * time.Millisecond
|
||||
timeouts.ResponseHeader = 25 * time.Millisecond
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
|
||||
waitFunc, waitFuncCalledCh, finishWaitFunc := notifyingFunc()
|
||||
defer finishWaitFunc()
|
||||
noSuchHost := fmt.Sprintf("%s.testing", t.Name())
|
||||
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&TestTLSConfig,
|
||||
&config.Tracing{},
|
||||
)
|
||||
stream, err := url.Parse("http://1.1.1.1")
|
||||
r.NoError(err)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req, &httpv1alpha1.HTTPScaledObject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "testns",
|
||||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Name: "nosuchdepl",
|
||||
Service: "nosuchsvc",
|
||||
Port: 9091,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
})
|
||||
req = util.RequestWithStream(req, stream)
|
||||
req.Host = noSuchHost
|
||||
|
||||
start := time.Now()
|
||||
hdl.ServeHTTP(res, req)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
t.Logf("elapsed time was %s", elapsed)
|
||||
// serving should take at least timeouts.DeploymentReplicas, but no more than
|
||||
// timeouts.DeploymentReplicas*4
|
||||
r.GreaterOrEqual(elapsed, timeouts.WorkloadReplicas)
|
||||
r.LessOrEqual(elapsed, timeouts.WorkloadReplicas*4)
|
||||
r.Equal(502, res.Code, "response code was unexpected")
|
||||
|
||||
// we will always return the X-KEDA-HTTP-Cold-Start header
|
||||
|
@ -226,9 +503,11 @@ func TestWaitsForWaitFunc(t *testing.T) {
|
|||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.DeploymentReplicas,
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
|
@ -237,6 +516,74 @@ func TestWaitsForWaitFunc(t *testing.T) {
|
|||
testSrvURL,
|
||||
originPort,
|
||||
"nosuchdepl",
|
||||
"noservice",
|
||||
))
|
||||
req = util.RequestWithStream(req, testSrvURL)
|
||||
req.Host = noSuchHost
|
||||
|
||||
// make the wait function finish after a short duration
|
||||
const waitDur = 100 * time.Millisecond
|
||||
go func() {
|
||||
time.Sleep(waitDur)
|
||||
finishWaitFunc()
|
||||
}()
|
||||
|
||||
start := time.Now()
|
||||
hdl.ServeHTTP(res, req)
|
||||
elapsed := time.Since(start)
|
||||
r.NoError(waitForSignal(waitFuncCalledCh, 1*time.Second))
|
||||
|
||||
// should take at least waitDur, but no more than waitDur*4
|
||||
r.GreaterOrEqual(elapsed, waitDur)
|
||||
r.Less(elapsed, waitDur*4)
|
||||
|
||||
r.Equal("true", res.Header().Get("X-KEDA-HTTP-Cold-Start"), "expected X-KEDA-HTTP-Cold-Start true")
|
||||
r.Equal(
|
||||
originRespCode,
|
||||
res.Code,
|
||||
"response code was unexpected",
|
||||
)
|
||||
}
|
||||
|
||||
func TestWaitsForWaitFuncTLS(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
|
||||
waitFunc, waitFuncCalledCh, finishWaitFunc := notifyingFunc()
|
||||
const (
|
||||
noSuchHost = "TestWaitsForWaitFunc.test"
|
||||
originRespCode = 201
|
||||
)
|
||||
testSrv, testSrvURL, err := kedanet.StartTestServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(originRespCode)
|
||||
}),
|
||||
)
|
||||
r.NoError(err)
|
||||
defer testSrv.Close()
|
||||
_, originPort, err := splitHostPort(testSrvURL.Host)
|
||||
r.NoError(err)
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&TestTLSConfig,
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req, targetFromURL(
|
||||
testSrvURL,
|
||||
originPort,
|
||||
"nosuchdepl",
|
||||
"nosuchsvc",
|
||||
))
|
||||
req = util.RequestWithStream(req, testSrvURL)
|
||||
req.Host = noSuchHost
|
||||
|
@ -286,17 +633,19 @@ func TestWaitHeaderTimeout(t *testing.T) {
|
|||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
waitFunc := func(context.Context, string, string) (int, error) {
|
||||
return 1, nil
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.DeploymentReplicas,
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&tls.Config{},
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
|
@ -307,9 +656,68 @@ func TestWaitHeaderTimeout(t *testing.T) {
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: "nosuchdepl",
|
||||
Service: "testsvc",
|
||||
Port: 9094,
|
||||
Service: "testsvc",
|
||||
Port: 9094,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
})
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
req.Host = originURL.Host
|
||||
|
||||
hdl.ServeHTTP(res, req)
|
||||
|
||||
r.Equal("false", res.Header().Get("X-KEDA-HTTP-Cold-Start"), "expected X-KEDA-HTTP-Cold-Start false")
|
||||
r.Equal(502, res.Code, "response code was unexpected")
|
||||
close(originHdlCh)
|
||||
}
|
||||
|
||||
func TestWaitHeaderTimeoutTLS(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
// the origin will wait for this channel to receive or close before it sends any data back to the
|
||||
// proxy
|
||||
originHdlCh := make(chan struct{})
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-originHdlCh
|
||||
w.WriteHeader(200)
|
||||
_, err := w.Write([]byte("test response"))
|
||||
r.NoError(err)
|
||||
}),
|
||||
)
|
||||
srv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer srv.Close()
|
||||
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
waitFunc := func(context.Context, string, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
hdl := newForwardingHandler(
|
||||
logr.Discard(),
|
||||
dialCtxFunc,
|
||||
waitFunc,
|
||||
forwardingConfig{
|
||||
waitTimeout: timeouts.WorkloadReplicas,
|
||||
respHeaderTimeout: timeouts.ResponseHeader,
|
||||
},
|
||||
&TestTLSConfig,
|
||||
&config.Tracing{},
|
||||
)
|
||||
const path = "/testfwd"
|
||||
res, req, err := reqAndRes(path)
|
||||
r.NoError(err)
|
||||
req = util.RequestWithHTTPSO(req, &httpv1alpha1.HTTPScaledObject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "testns",
|
||||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Name: "nosuchdepl",
|
||||
Service: "testsvc",
|
||||
Port: 9094,
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](1234),
|
||||
},
|
||||
|
@ -350,13 +758,13 @@ func notifyingFunc() (forwardWaitFunc, <-chan struct{}, func()) {
|
|||
finishFunc := func() {
|
||||
close(finishCh)
|
||||
}
|
||||
return func(ctx context.Context, _, _ string) (int, error) {
|
||||
return func(ctx context.Context, _, _ string) (bool, error) {
|
||||
close(calledCh)
|
||||
select {
|
||||
case <-finishCh:
|
||||
return 0, nil
|
||||
return true, nil
|
||||
case <-ctx.Done():
|
||||
return 0, fmt.Errorf("TEST FUNCTION CONTEXT ERROR: %w", ctx.Err())
|
||||
return true, fmt.Errorf("TEST FUNCTION CONTEXT ERROR: %w", ctx.Err())
|
||||
}
|
||||
}, calledCh, finishFunc
|
||||
}
|
||||
|
@ -364,7 +772,8 @@ func notifyingFunc() (forwardWaitFunc, <-chan struct{}, func()) {
|
|||
func targetFromURL(
|
||||
u *url.URL,
|
||||
port int,
|
||||
deployment string,
|
||||
workload string,
|
||||
service string,
|
||||
) *httpv1alpha1.HTTPScaledObject {
|
||||
host := strings.Split(u.Host, ":")[0]
|
||||
return &httpv1alpha1.HTTPScaledObject{
|
||||
|
@ -373,9 +782,9 @@ func targetFromURL(
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: deployment,
|
||||
Service: ":" + host,
|
||||
Port: int32(port),
|
||||
Name: workload,
|
||||
Service: service,
|
||||
Port: int32(port),
|
||||
},
|
||||
TargetPendingRequests: ptr.To[int32](123),
|
||||
},
|
||||
|
@ -384,10 +793,10 @@ func targetFromURL(
|
|||
|
||||
func defaultTimeouts() config.Timeouts {
|
||||
return config.Timeouts{
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
DeploymentReplicas: 1 * time.Second,
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
WorkloadReplicas: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/contrib/propagators/b3"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
)
|
||||
|
||||
var serviceName = "keda-http-interceptor"
|
||||
|
||||
func SetupOTelSDK(ctx context.Context, tCfg *config.Tracing) (shutdown func(context.Context) error, err error) {
|
||||
var shutdownFuncs []func(context.Context) error
|
||||
|
||||
// shutdown calls cleanup functions registered via shutdownFuncs.
|
||||
// The errors from the calls are joined.
|
||||
// Each registered cleanup will be invoked once.
|
||||
shutdown = func(ctx context.Context) error {
|
||||
var err error
|
||||
for _, fn := range shutdownFuncs {
|
||||
err = errors.Join(err, fn(ctx))
|
||||
}
|
||||
shutdownFuncs = nil
|
||||
return err
|
||||
}
|
||||
|
||||
handleErr := func(inErr error) {
|
||||
err = errors.Join(inErr, shutdown(ctx))
|
||||
}
|
||||
|
||||
res, err := newResource(serviceName)
|
||||
if err != nil {
|
||||
handleErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
prop := NewPropagator()
|
||||
otel.SetTextMapPropagator(prop)
|
||||
|
||||
tracerProvider, err := newTraceProvider(ctx, res, tCfg)
|
||||
if err != nil {
|
||||
handleErr(err)
|
||||
return
|
||||
}
|
||||
shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown)
|
||||
otel.SetTracerProvider(tracerProvider)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func newResource(serviceName string) (*resource.Resource, error) {
|
||||
return resource.Merge(resource.Default(),
|
||||
resource.NewWithAttributes(semconv.SchemaURL,
|
||||
semconv.ServiceName(serviceName),
|
||||
))
|
||||
}
|
||||
|
||||
func NewPropagator() propagation.TextMapPropagator {
|
||||
return propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
propagation.Baggage{},
|
||||
b3.New(),
|
||||
)
|
||||
}
|
||||
|
||||
func newTraceProvider(ctx context.Context, res *resource.Resource, tCfg *config.Tracing) (*trace.TracerProvider, error) {
|
||||
traceExporter, err := newExporter(ctx, tCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
traceProvider := trace.NewTracerProvider(
|
||||
trace.WithSampler(trace.AlwaysSample()),
|
||||
trace.WithBatcher(traceExporter),
|
||||
trace.WithResource(res),
|
||||
)
|
||||
return traceProvider, nil
|
||||
}
|
||||
|
||||
func newExporter(ctx context.Context, tCfg *config.Tracing) (trace.SpanExporter, error) {
|
||||
switch strings.ToLower(tCfg.Exporter) {
|
||||
case "console":
|
||||
return stdouttrace.New()
|
||||
case "http/protobuf":
|
||||
return otlptracehttp.New(ctx)
|
||||
case "grpc":
|
||||
return otlptracegrpc.New(ctx)
|
||||
default:
|
||||
return nil, errors.New("no valid tracing exporter defined")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package tracing
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
)
|
||||
|
||||
func TestTracingConfig(t *testing.T) {
|
||||
tracingCfg := config.MustParseTracing()
|
||||
tracingCfg.Enabled = true
|
||||
|
||||
// check defaults are set correctly
|
||||
assert.Equal(t, "console", tracingCfg.Exporter)
|
||||
assert.Equal(t, true, tracingCfg.Enabled)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue