Compare commits
198 Commits
Author | SHA1 | Date |
---|---|---|
|
d032b8b4f1 | |
|
9afe55b5fc | |
|
bf355649c6 | |
|
29a6c2b509 | |
|
dc863c6fcd | |
|
6a6adfb7ac | |
|
2317f75346 | |
|
fe41713ec3 | |
|
17b2af021d | |
|
d2bed33270 | |
|
d891e6e5bd | |
|
30e1694baf | |
|
8f84195862 | |
|
46884e237d | |
|
c34fc522fa | |
|
ee764f97ea | |
|
a99deeeb8b | |
|
30e3ecc2ea | |
|
850678c13a | |
|
616cab02d1 | |
|
6fa0b98608 | |
|
d15f4428cf | |
|
8a6ae99921 | |
|
20478e7f2d | |
|
d7ef9d7b89 | |
|
9b76f367e1 | |
|
d159468a02 | |
|
28974318d0 | |
|
61d52ca769 | |
|
d0e7840e9c | |
|
6d76063e54 | |
|
ca6a4d6c02 | |
|
8e95401e96 | |
|
ddbb17ce0b | |
|
504b37ed69 | |
|
54d0ec6247 | |
|
b7d0a3e035 | |
|
fbb0bc9bca | |
|
f8cde8c47e | |
|
a5f3349b17 | |
|
317d47406e | |
|
87e713c8e7 | |
|
4bfcd9fc6d | |
|
880eeb08eb | |
|
c92a3e2c56 | |
|
55b029b409 | |
|
6a30f1842d | |
|
dd40161cb0 | |
|
15a1dae2d2 | |
|
45f645f19e | |
|
ab140c5198 | |
|
ade3bb090b | |
|
993b7bf242 | |
|
fb3e48b94f | |
|
f5ab058701 | |
|
5d2e0add55 | |
|
48a1881a76 | |
|
65b1d73d80 | |
|
052ffce0a6 | |
|
6abd03203b | |
|
08c811fe20 | |
|
8a1d49050a | |
|
2601d92888 | |
|
30b956b5c8 | |
|
70aa9be886 | |
|
459122382e | |
|
aa8ca2e481 | |
|
ec5109d986 | |
|
846ee80394 | |
|
54671bddeb | |
|
81f7469ad1 | |
|
272cd2d4fc | |
|
98770083ee | |
|
d64a750808 | |
|
1e426128da | |
|
a86b13497e | |
|
cf2c4a98eb | |
|
84a9560331 | |
|
1e27b395bd | |
|
44ac04e4e0 | |
|
8ede929e2f | |
|
c96c106c72 | |
|
166ce15b04 | |
|
cc832ed264 | |
|
e184bc5437 | |
|
9f82ff979d | |
|
30e7571b19 | |
|
b283195843 | |
|
e04c24286e | |
|
f1f2707c63 | |
|
0793ecec5a | |
|
5557cd8741 | |
|
7d1740ab82 | |
|
1c9f7348b8 | |
|
38f50bfb0a | |
|
e3d2e81220 | |
|
7feda00373 | |
|
c5de19c52b | |
|
9366827350 | |
|
0237611b6a | |
|
c7ccd3384e | |
|
f48b7d813c | |
|
3ee0445005 | |
|
473c42c637 | |
|
f7bb9f56f6 | |
|
d0a564a9c1 | |
|
a4f9f39ac5 | |
|
a8c1258267 | |
|
b37046ea47 | |
|
d52daad348 | |
|
e6896c2ea4 | |
|
dbac89dc37 | |
|
5b3063e388 | |
|
a71b194ec5 | |
|
849c62af1b | |
|
652c47f5da | |
|
5839732134 | |
|
14839c3558 | |
|
8b44922940 | |
|
793a48d235 | |
|
448a3deb10 | |
|
513ecb5d74 | |
|
5e7af24783 | |
|
2b037b804b | |
|
adc868ff6a | |
|
55086474c9 | |
|
8935f27f4f | |
|
8608805972 | |
|
2c0a177f60 | |
|
d337c8d4e4 | |
|
6453b18638 | |
|
d9277ab7fb | |
|
98ad59dd13 | |
|
a59daf2452 | |
|
b8f3972522 | |
|
855547a7c6 | |
|
584b890935 | |
|
1aea9bf659 | |
|
e658582052 | |
|
6b0edeefb2 | |
|
3bf119b3de | |
|
ec12921821 | |
|
7908755121 | |
|
15718d165a | |
|
f06fcb9c2a | |
|
900da11265 | |
|
1e1ddb2934 | |
|
a842764b96 | |
|
814f33330d | |
|
214431250a | |
|
1bbbd17c06 | |
|
60a76153e6 | |
|
bd556c794f | |
|
1f2f54c560 | |
|
2b7641fb9e | |
|
8ee27fdf6e | |
|
7f0572987b | |
|
c0b7baac56 | |
|
2fa0be3f25 | |
|
b3519ab615 | |
|
56d295d47d | |
|
6021290239 | |
|
8c18bd8f73 | |
|
6c665af7bc | |
|
34386f0a43 | |
|
e8b1c015f0 | |
|
d374cbecbf | |
|
8ea0896621 | |
|
0a1d1ded4e | |
|
4670544de9 | |
|
ee89db0759 | |
|
457d2c4ef7 | |
|
e64a56f958 | |
|
b39aceb7aa | |
|
fe493ad3f0 | |
|
997e094ca7 | |
|
6121019b0b | |
|
aadaf60615 | |
|
8fbc83c218 | |
|
d782083ec9 | |
|
3fd3a983cb | |
|
0321ee0bef | |
|
179b26a58e | |
|
1986161044 | |
|
04204c20bd | |
|
fe035b9682 | |
|
6eaf23aab6 | |
|
a3039f6135 | |
|
ecd4a41f59 | |
|
db8901a5f2 | |
|
061033989c | |
|
6cdec14001 | |
|
6e7f15d54c | |
|
4656eca550 | |
|
a628222449 | |
|
a6cc536bfe | |
|
d6a312e287 | |
|
835f2c0f09 |
|
@ -3,7 +3,7 @@
|
|||
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
|
||||
#-------------------------------------------------------------------------------------------------------------
|
||||
|
||||
FROM golang:1.19.7
|
||||
FROM golang:1.24.3
|
||||
|
||||
# Avoid warnings by switching to noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
@ -55,7 +55,7 @@ RUN apt-get update \
|
|||
&& go install honnef.co/go/tools/cmd/staticcheck@latest \
|
||||
&& go install golang.org/x/tools/gopls@latest \
|
||||
# Install golangci-lint
|
||||
&& curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.49.0 \
|
||||
&& curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.0 \
|
||||
#
|
||||
# Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user.
|
||||
&& groupadd --gid $USER_GID $USERNAME \
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
name: Feature request 🧭
|
||||
description: Suggest an idea for this project
|
||||
labels: "needs-discussion,feature-request"
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposal
|
||||
description: "What would you like to have as a feature"
|
||||
placeholder: "A clear and concise description of what you want to happen."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Use-Case
|
||||
description: "How would this help you?"
|
||||
placeholder: "Tell us more what you'd like to achieve."
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: interested-in-implementing-the-feature
|
||||
attributes:
|
||||
label: Is this a feature you are interested in implementing yourself?
|
||||
options:
|
||||
- 'No'
|
||||
- 'Maybe'
|
||||
- 'Yes'
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: anything-else
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: "Let us know if you have anything else to share"
|
|
@ -46,10 +46,16 @@ body:
|
|||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: dropdown
|
||||
id: keda-http-version
|
||||
attributes:
|
||||
label: What version of the KEDA HTTP Add-on are you running?
|
||||
label: "HTTP Add-on Version"
|
||||
description: "What version of the KEDA HTTP Add-on are you running?"
|
||||
options:
|
||||
- "0.10.0"
|
||||
- "0.9.0"
|
||||
- "0.8.0"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
|
@ -58,11 +64,10 @@ body:
|
|||
label: Kubernetes Version
|
||||
description: What version of Kubernetes that are you running?
|
||||
options:
|
||||
- "1.26"
|
||||
- "1.25"
|
||||
- "1.24"
|
||||
- "1.23"
|
||||
- "< 1.23"
|
||||
- "1.32"
|
||||
- "1.31"
|
||||
- "1.30"
|
||||
- "< 1.30"
|
||||
- "Other"
|
||||
validations:
|
||||
required: false
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
name: KEDA Release Tracker
|
||||
about: Template to keep track of the progress for a new KEDA HTTP add-on release.
|
||||
title: "Release: "
|
||||
labels: governance,release-management
|
||||
assignees: tomkerkhove,jorturfer
|
||||
---
|
||||
|
||||
This issue template is used to track the rollout of a new KEDA HTTP add-on version.
|
||||
|
||||
For the full release process, we recommend reading [this document]([https://github.com/kedacore/keda/blob/main/RELEASE-PROCESS.md](https://github.com/kedacore/http-add-on/blob/main/RELEASE-PROCESS.md)).
|
||||
|
||||
## Required items
|
||||
|
||||
- [ ] List items that are still open, but required for this release
|
||||
|
||||
# Timeline
|
||||
|
||||
We aim to release this release in the week of <week range, example March 27-31>.
|
||||
|
||||
## Progress
|
||||
|
||||
- [ ] Add the new version to GitHub Bug report template
|
||||
- [ ] Create KEDA release
|
||||
- [ ] Prepare & ship Helm chart
|
||||
- [ ] Publish on Artifact Hub ([repo](https://github.com/kedacore/external-scalers))
|
||||
- [ ] Provide update in Slack
|
||||
- [ ] Tweet about new release
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
labels: needs-discussion,feature-request
|
||||
---
|
||||
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Use-Case
|
||||
|
||||
Tell us more what you'd like to achieve
|
||||
|
||||
### Specification
|
||||
|
||||
Tell us in detail how this feature should work
|
|
@ -8,6 +8,10 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -16,6 +20,10 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: docker
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
@ -24,3 +32,7 @@ updates:
|
|||
labels:
|
||||
- enhancement
|
||||
- dependency-management
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
|
|
|
@ -6,7 +6,7 @@ daysUntilClose: 7
|
|||
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- cant-touch-this
|
||||
- stale-bot-ignore
|
||||
- feature
|
||||
- security
|
||||
|
||||
|
|
|
@ -3,6 +3,9 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
track_issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -5,18 +5,26 @@ on:
|
|||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -26,9 +34,23 @@ jobs:
|
|||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: canary
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: canary
|
||||
|
|
|
@ -2,14 +2,19 @@ name: Publish official image to GitHub Container Registry
|
|||
|
||||
on:
|
||||
push:
|
||||
tags: [ "v[0-9].[0-9].[0-9]" ]
|
||||
tags: ["v[0-9].[0-9].[0-9]"]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token **not production ready**
|
||||
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
@ -19,8 +24,13 @@ jobs:
|
|||
run: |
|
||||
echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
|
||||
|
||||
- name: Release Deployment YAML file
|
||||
run: make release
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
# Username used to log in to a Docker registry. If not set then no login will occur
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -30,9 +40,54 @@ jobs:
|
|||
registry: ghcr.io
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Publish on GitHub Container Registry
|
||||
run: make publish-multiarch
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# https://github.com/sigstore/cosign-installer
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
|
||||
- name: Check Cosign install!
|
||||
run: cosign version
|
||||
|
||||
- name: Sign KEDA images published on GitHub Container Registry
|
||||
# This step uses the identity token to provision an ephemeral certificate
|
||||
# against the sigstore community Fulcio instance.
|
||||
run: make sign-images
|
||||
env:
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
# Get release information to determine id of the current release
|
||||
- name: Get Release
|
||||
id: get-release-info
|
||||
uses: bruceadams/get-release@74c3d60f5a28f358ccf241a00c9021ea16f0569f # v1.3.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Upload deployment YAML file to GitHub release
|
||||
- name: Upload Deployment YAML file
|
||||
id: upload-deployment-yaml
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
|
||||
# Upload CRD deployment YAML file to GitHub release
|
||||
- name: Upload Deployment YAML file
|
||||
id: upload-crd-deployment-yaml
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: https://uploads.github.com/repos/kedacore/http-add-on/releases/${{ steps.get-release-info.outputs.id }}/assets?name=keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_path: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_name: keda-add-ons-http-${{ steps.get_version.outputs.VERSION }}-crds.yaml
|
||||
asset_content_type: application/x-yaml
|
||||
|
|
|
@ -5,6 +5,13 @@ on:
|
|||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e_tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -12,14 +19,14 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.26, v1.25, v1.24]
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.26
|
||||
kindImage: kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352
|
||||
- kubernetesVersion: v1.25
|
||||
kindImage: kindest/node:v1.25.0@sha256:428aaa17ec82ccde0131cb2d1ca6547d13cf5fdabcc0bbecf749baa935387cbf
|
||||
- kubernetesVersion: v1.24
|
||||
kindImage: kindest/node:v1.24.4@sha256:adfaebada924a26c2c9308edd53c6e33b3d4e453782c0063dc0028bdebaddf98
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- name: Install prerequisites
|
||||
run: |
|
||||
|
@ -28,13 +35,17 @@ jobs:
|
|||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@v3
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@v1.7.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: cluster
|
||||
|
@ -68,7 +79,7 @@ jobs:
|
|||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Generate images
|
||||
run: |
|
||||
|
@ -85,26 +96,26 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetesVersion: [v1.26, v1.25, v1.24]
|
||||
kubernetesVersion: [v1.32, v1.31, v1.30]
|
||||
include:
|
||||
- kubernetesVersion: v1.26
|
||||
kindImage: kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352
|
||||
- kubernetesVersion: v1.25
|
||||
kindImage: kindest/node:v1.25.0@sha256:428aaa17ec82ccde0131cb2d1ca6547d13cf5fdabcc0bbecf749baa935387cbf
|
||||
- kubernetesVersion: v1.24
|
||||
kindImage: kindest/node:v1.24.4@sha256:adfaebada924a26c2c9308edd53c6e33b3d4e453782c0063dc0028bdebaddf98
|
||||
- kubernetesVersion: v1.32
|
||||
kindImage: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027
|
||||
- kubernetesVersion: v1.31
|
||||
kindImage: kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30
|
||||
- kubernetesVersion: v1.30
|
||||
kindImage: kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: "1.24"
|
||||
|
||||
- name: Helm install
|
||||
uses: Azure/setup-helm@v3
|
||||
uses: Azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Create k8s ${{ matrix.kubernetesVersion }} Kind Cluster
|
||||
uses: helm/kind-action@v1.7.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
node_image: ${{ matrix.kindImage }}
|
||||
cluster_name: ${{ runner.name }}
|
||||
|
|
|
@ -3,12 +3,19 @@ on:
|
|||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build_scaler:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Scaler
|
||||
|
@ -18,9 +25,9 @@ jobs:
|
|||
|
||||
build_operator:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Operator
|
||||
|
@ -30,9 +37,9 @@ jobs:
|
|||
|
||||
build_interceptor:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
- name: Build The Interceptor
|
||||
|
|
|
@ -5,14 +5,22 @@ on:
|
|||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
linkinator:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JustinBeckwith/linkinator-action@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
- uses: JustinBeckwith/linkinator-action@3d5ba091319fa7b0ac14703761eebb7d100e6f6d # v1
|
||||
with:
|
||||
paths: "**/*.md"
|
||||
markdown: true
|
||||
concurrency: 1
|
||||
retry: true
|
||||
linksToSkip: "https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler"
|
||||
linksToSkip: "https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator, https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler,http://opentelemetry-collector.open-telemetry-system:4318,http://opentelemetry-collector.open-telemetry-system:4318/v1/traces, https://www.gnu.org/software/make/"
|
||||
|
|
|
@ -5,11 +5,18 @@ on:
|
|||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
name: validate - ${{ matrix.name }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
container: ghcr.io/kedacore/build-tools:1.19.5
|
||||
container: ghcr.io/kedacore/keda-tools:1.24.3
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
@ -18,7 +25,7 @@ jobs:
|
|||
- runner: ubuntu-latest
|
||||
name: amd64
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Register workspace path
|
||||
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
@ -33,13 +40,13 @@ jobs:
|
|||
echo ::set-output name=build_cache::$(go env GOCACHE)
|
||||
|
||||
- name: Go modules cache
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.mod_cache }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ steps.go-paths.outputs.build_cache }}
|
||||
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
|
||||
|
@ -63,16 +70,16 @@ jobs:
|
|||
run: ARCH=${{ matrix.name }} make test
|
||||
|
||||
statics:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Static Checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4.6.1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- uses: actions/setup-go@v4
|
||||
go-version: "1.24"
|
||||
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
go-version: 1.19
|
||||
- name: Get golangci
|
||||
run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.52.2
|
||||
- uses: pre-commit/action@v3.0.0
|
||||
version: v2.1.0
|
||||
|
|
|
@ -355,3 +355,10 @@ admin/Cargo.lock
|
|||
|
||||
/target
|
||||
.envrc
|
||||
|
||||
# locally generated certs for testing TLS
|
||||
*.crt
|
||||
*.pem
|
||||
*.csr
|
||||
*.srl
|
||||
*.ext
|
||||
|
|
111
.golangci.yml
111
.golangci.yml
|
@ -1,71 +1,74 @@
|
|||
# options for analysis running
|
||||
version: "2"
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
# add the build tags to include e2e tests files
|
||||
build-tags:
|
||||
- e2e
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
- e2e
|
||||
linters:
|
||||
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- typecheck
|
||||
- dupl
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- nolintlint
|
||||
#- rowserrcheck
|
||||
- gofmt
|
||||
- revive
|
||||
- goimports
|
||||
- misspell
|
||||
- bodyclose
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- exportloopref
|
||||
- depguard
|
||||
- copyloopvar
|
||||
#- depguard #https://github.com/kedacore/keda/issues/4980
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
#- funlen
|
||||
- gci
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- unparam
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002 # disable excluding of issues about comments from golint
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gomnd
|
||||
- dupl
|
||||
- unparam
|
||||
# Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to
|
||||
# resolve https://github.com/kedacore/keda/issues/4379
|
||||
- path: operator/controllers/http/suite_test.go
|
||||
linters:
|
||||
- gci
|
||||
- path: operator/main.go
|
||||
linters:
|
||||
- gci
|
||||
linters-settings:
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 40
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/kedacore/http-add-on)
|
||||
settings:
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 40
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- dupl
|
||||
- revive
|
||||
- unparam
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/kedacore/http-add-on)
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
# Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to
|
||||
# resolve https://github.com/kedacore/keda/issues/4379
|
||||
- operator/controllers/http/suite_test.go
|
||||
- operator/main.go
|
||||
|
|
|
@ -37,3 +37,9 @@ repos:
|
|||
entry: golangci-lint run
|
||||
types: [go]
|
||||
pass_filenames: false
|
||||
- id: validate-changelog
|
||||
name: Validate Changelog
|
||||
language: system
|
||||
entry: "bash hack/validate-changelog.sh"
|
||||
pass_filenames: false
|
||||
files: CHANGELOG\.md
|
||||
|
|
|
@ -6,7 +6,8 @@ This page contains a list of organizations who are using KEDA's HTTP Add-on in p
|
|||
|
||||
| Organization | Status | More Information (Blog post, etc.) |
|
||||
| ------------ | ---------| ---------------|
|
||||
|N/A|| N/A|
|
||||
| PropulsionAI ||[PropulsionAI](https://propulsionhq.com) allows you to add AI to your apps, without writing code.|
|
||||
| REWE Digital ||From delivery service to market — [REWE Digital](https://www.rewe-digital.com) strengthens leading technological position of REWE Group in food retail sector. |
|
||||
|
||||
## Become an adopter!
|
||||
|
||||
|
|
175
CHANGELOG.md
175
CHANGELOG.md
|
@ -10,6 +10,12 @@ This changelog keeps track of work items that have been completed and are ready
|
|||
## History
|
||||
|
||||
- [Unreleased](#unreleased)
|
||||
- [v0.10.0](#v0100)
|
||||
- [v0.9.0](#v090)
|
||||
- [v0.8.0](#v080)
|
||||
- [v0.7.0](#v070)
|
||||
- [v0.6.0](#v060)
|
||||
- [v0.5.0](#v050)
|
||||
|
||||
## Unreleased
|
||||
|
||||
|
@ -19,23 +25,176 @@ This changelog keeps track of work items that have been completed and are ready
|
|||
|
||||
### New
|
||||
|
||||
- **General**: Add failover service on cold-start ([#1280](https://github.com/kedacore/http-add-on/pull/1280))
|
||||
- **General**: Add configurable tracing support to the interceptor proxy ([#1021](https://github.com/kedacore/http-add-on/pull/1021))
|
||||
- **General**: Allow using HSO and SO with different names ([#1293](https://github.com/kedacore/http-add-on/issues/1293))
|
||||
- **General**: Support profiling for KEDA components ([#4789](https://github.com/kedacore/keda/issues/4789))
|
||||
- **General**: Add possibility to skip TLS verification for upstreams in interceptor ([#1307](https://github.com/kedacore/http-add-on/pull/1307))
|
||||
### Improvements
|
||||
|
||||
- **Interceptor**: Support HTTPScaledObject scoped timeout ([#813](https://github.com/kedacore/http-add-on/issues/813))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### Deprecations
|
||||
|
||||
- **General**: TODO ([#TODO](https://github.com/kedacore/http-add-on/issues/TODO))
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.10.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Fix infrastructure crashes when deleting ScaledObject while scaling
|
||||
- **General**: Fix kubectl active printcolumn ([#1211](https://github.com/kedacore/http-add-on/issues/1211))
|
||||
- **General**: Support InitialCooldownPeriod for HTTPScaledObject [#1213](https://github.com/kedacore/http-add-on/issues/1213)
|
||||
|
||||
### Other
|
||||
|
||||
- **Documentation**: Correct the service name used in the walkthrough documentation ([#1244](https://github.com/kedacore/http-add-on/pull/1244))
|
||||
|
||||
## v0.9.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: Drop support for deprecated field `spec.scaleTargetRef.deployment` ([#1061](https://github.com/kedacore/http-add-on/issues/1061))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support portName in HTTPScaledObject service scaleTargetRef ([#1174](https://github.com/kedacore/http-add-on/issues/1174))
|
||||
- **General**: Support setting multiple TLS certs for different domains on the interceptor proxy ([#1116](https://github.com/kedacore/http-add-on/issues/1116))
|
||||
- **Interceptor**: Add support for for AWS ELB healthcheck probe ([#1198](https://github.com/kedacore/http-add-on/issues/1198))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Align the interceptor metrics env var configuration with the OTEL spec ([#1031](https://github.com/kedacore/http-add-on/issues/1031))
|
||||
- **General**: Include trailing 0 window buckets in RPS calculation ([#1075](https://github.com/kedacore/http-add-on/issues/1075))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Sign images with Cosign ([#1062](https://github.com/kedacore/http-add-on/issues/1062))
|
||||
|
||||
## v0.8.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add configurable TLS on the wire support to the interceptor proxy ([#907](https://github.com/kedacore/http-add-on/issues/907))
|
||||
- **General**: Add support for collecting metrics using a Prometheus compatible endpoint or by sending metrics to an OpenTelemetry's HTTP endpoint ([#910](https://github.com/kedacore/http-add-on/issues/910))
|
||||
- **General**: Propagate HTTPScaledObject labels and annotations to ScaledObject ([#840](https://github.com/kedacore/http-add-on/issues/840))
|
||||
- **General**: Provide support for allowing HTTP scaler to work alongside other core KEDA scalers ([#489](https://github.com/kedacore/http-add-on/issues/489))
|
||||
- **General**: Support aggregation windows ([#882](https://github.com/kedacore/http-add-on/issues/882))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Ensure operator is aware about changes on underlying ScaledObject ([#900](https://github.com/kedacore/http-add-on/issues/900))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
- **General**: Deprecated `targetPendingRequests` in favor of `spec.scalingMetric.*.targetValue` ([#959](https://github.com/kedacore/http-add-on/discussions/959))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Align with the new format of Ingress in the example demo ([#979](https://github.com/kedacore/http-add-on/pull/979))
|
||||
- **General**: Unify loggers ([#958](https://github.com/kedacore/http-add-on/issues/958))
|
||||
|
||||
## v0.7.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **General**: `host` field has been removed in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552)|[#888](https://github.com/kedacore/http-add-on/pull/888))
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Support any resource which implements `/scale` subresource ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **General**: Improve Scaler reliability adding probes and 3 replicas ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: Add new user agent probe ([#862](https://github.com/kedacore/http-add-on/issues/862))
|
||||
- **General**: Fix external scaler getting into bad state when retrieving queue lengths fails. ([#870](https://github.com/kedacore/http-add-on/issues/870))
|
||||
- **General**: Increase ScaledObject polling interval to 15 seconds ([#799](https://github.com/kedacore/http-add-on/issues/799))
|
||||
- **General**: Set forward request RawPath to original request RawPath ([#864](https://github.com/kedacore/http-add-on/issues/864))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: Deprecated `KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS` in favor of `KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS` ([#438](https://github.com/kedacore/http-add-on/issues/438))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Bump golang version ([#853](https://github.com/kedacore/http-add-on/pull/853))
|
||||
|
||||
## v0.6.0
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Add manifests to deploy the Add-on ([#716](https://github.com/kedacore/http-add-on/issues/716))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **Scaler**: Decrease memory usage by allowing increasing stream interval configuration ([#745](https://github.com/kedacore/http-add-on/pull/745))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **Interceptor**: Add support for streaming responses ([#743](https://github.com/kedacore/http-add-on/issues/743))
|
||||
- **Interceptor**: Fatal error: concurrent map iteration and map write ([#726](https://github.com/kedacore/http-add-on/issues/726))
|
||||
- **Interceptor**: Keep original Host in the Host header ([#331](https://github.com/kedacore/http-add-on/issues/331))
|
||||
- **Interceptor**: Provide graceful shutdown for http servers on SIGINT and SIGTERM ([#731](https://github.com/kedacore/http-add-on/issues/731))
|
||||
- **Operator**: Remove ScaledObject `name` & `app` custom labels ([#717](https://github.com/kedacore/http-add-on/issues/717))
|
||||
- **Scaler**: Provide graceful shutdown for grpc server on SIGINT and SIGTERM ([#731](https://github.com/kedacore/http-add-on/issues/731))
|
||||
- **Scaler**: Reimplement custom interceptor metrics ([#718](https://github.com/kedacore/http-add-on/issues/718))
|
||||
|
||||
### Deprecations
|
||||
|
||||
You can find all deprecations in [this overview](https://github.com/kedacore/http-add-on/labels/breaking-change) and [join the discussion here](https://github.com/kedacore/http-add-on/discussions/categories/deprecations).
|
||||
|
||||
New deprecation(s):
|
||||
|
||||
- **General**: `host` field deprecated in favor of `hosts` in `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Adding a changelog validating script to check for formatting and order ([#761](https://github.com/kedacore/http-add-on/pull/761))
|
||||
- **General**: Skip not required CI checks on PRs on new commits ([#801](https://github.com/kedacore/http-add-on/pull/801))
|
||||
|
||||
## v0.5.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
None.
|
||||
|
||||
### New
|
||||
|
||||
- **General**: Log incoming requests using the Combined Log Format ([#669](https://github.com/kedacore/http-add-on/pull/669))
|
||||
- **Routing**: Add multi-host support to `HTTPScaledObject` ([#552](https://github.com/kedacore/http-add-on/issues/552))
|
||||
- **Routing**: Support path-based routing ([#338](https://github.com/kedacore/http-add-on/issues/338))
|
||||
- **General**: Log incoming requests using the Combined Log Format ([#669](https://github.com/kedacore/http-add-on/pull/669))
|
||||
|
||||
### Improvements
|
||||
|
||||
- **General**: Automatically tag Docker image with commit SHA ([#567](https://github.com/kedacore/http-add-on/issues/567))
|
||||
- **RBAC**: Introduce fine-grained permissions per component and reduce required permissions ([#612](https://github.com/kedacore/http-add-on/issues/612))
|
||||
- **Operator**: Migrate project to Kubebuilder v3 ([#625](https://github.com/kedacore/http-add-on/issues/625))
|
||||
- **RBAC**: Introduce fine-grained permissions per component and reduce required permissions ([#612](https://github.com/kedacore/http-add-on/issues/612))
|
||||
- **Routing**: New routing table implementation that relies on the live state of HTTPScaledObjects on the K8s Cluster instead of a ConfigMap that is updated periodically ([#605](https://github.com/kedacore/http-add-on/issues/605))
|
||||
|
||||
### Fixes
|
||||
|
||||
- **General**: HTTPScaledObject is the owner of the underlying ScaledObject ([#703](https://github.com/kedacore/http-add-on/issues/703))
|
||||
- **Routing**: Lookup host without port ([#608](https://github.com/kedacore/http-add-on/issues/608))
|
||||
- **Controller**: Use kedav1alpha1.ScaledObject default values ([#607](https://github.com/kedacore/http-add-on/issues/607))
|
||||
- **General**: Changes to HTTPScaledObjects now take effect ([#605](https://github.com/kedacore/http-add-on/issues/605))
|
||||
- **General**: HTTPScaledObject is the owner of the underlying ScaledObject ([#703](https://github.com/kedacore/http-add-on/issues/703))
|
||||
- **Controller**: Use kedav1alpha1.ScaledObject default values ([#607](https://github.com/kedacore/http-add-on/issues/607))
|
||||
- **Routing**: Lookup host without port ([#608](https://github.com/kedacore/http-add-on/issues/608))
|
||||
|
||||
### Deprecations
|
||||
|
||||
|
@ -47,9 +206,9 @@ New deprecation(s):
|
|||
|
||||
Previously announced deprecation(s):
|
||||
|
||||
- TODO
|
||||
- None.
|
||||
|
||||
### Other
|
||||
|
||||
- **General**: Use kubernetes e2e images for e2e test and samples ([#665]https://github.com/kedacore/http-add-on/issues/665)
|
||||
- **e2e tests**: Use the same e2e system as in core ([#686]https://github.com/kedacore/http-add-on/pull/686)
|
||||
- **General**: Use kubernetes e2e images for e2e test and samples ([#665](https://github.com/kedacore/http-add-on/issues/665))
|
||||
- **e2e tests**: Use the same e2e system as in core ([#686](https://github.com/kedacore/http-add-on/pull/686))
|
||||
|
|
|
@ -89,7 +89,7 @@ K9s integrates Hey, a CLI tool to benchmark HTTP endpoints similar to AB bench.
|
|||
```
|
||||
- You'll need to clone the repository to get access to this chart. If you have your own Deployment and Service installed, you can go right to creating an HTTPScaledObject. We use the provided sample HTTPScaledObject -
|
||||
```
|
||||
$ kubectl create -n $NAMESPACE -f examples/v0.3.0/httpscaledobject.yaml
|
||||
$ kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```
|
||||
- Testing Your Installation using k9s:
|
||||
```
|
||||
|
|
96
Makefile
96
Makefile
|
@ -32,6 +32,28 @@ GO_LDFLAGS="-X github.com/kedacore/http-add-on/pkg/build.version=${VERSION} -X g
|
|||
GIT_COMMIT ?= $(shell git rev-list -1 HEAD)
|
||||
GIT_COMMIT_SHORT ?= $(shell git rev-parse --short HEAD)
|
||||
|
||||
COSIGN_FLAGS ?= -y -a GIT_HASH=${GIT_COMMIT} -a GIT_VERSION=${VERSION} -a BUILD_DATE=${DATE}
|
||||
|
||||
define DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
DNS.2 = *.keda
|
||||
DNS.3 = *.interceptor-tls-test-ns
|
||||
endef
|
||||
export DOMAINS
|
||||
|
||||
define ABC_DOMAINS
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = abc
|
||||
endef
|
||||
export ABC_DOMAINS
|
||||
|
||||
# Build targets
|
||||
|
||||
build-operator:
|
||||
|
@ -45,13 +67,36 @@ build-scaler:
|
|||
|
||||
build: build-operator build-interceptor build-scaler
|
||||
|
||||
# generate certs for local unit and e2e tests
|
||||
rootca-test-certs:
|
||||
mkdir -p certs
|
||||
openssl req -x509 -nodes -new -sha256 -days 1024 -newkey rsa:2048 -keyout certs/RootCA.key -out certs/RootCA.pem -subj "/C=US/CN=Keda-Root-CA"
|
||||
openssl x509 -outform pem -in certs/RootCA.pem -out certs/RootCA.crt
|
||||
|
||||
test-certs: rootca-test-certs
|
||||
echo "$$DOMAINS" > certs/domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/tls.key -out certs/tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=keda.local"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/domains.ext -out certs/tls.crt
|
||||
echo "$$ABC_DOMAINS" > certs/abc_domains.ext
|
||||
openssl req -new -nodes -newkey rsa:2048 -keyout certs/abc.tls.key -out certs/abc.tls.csr -subj "/C=US/ST=KedaState/L=KedaCity/O=Keda-Certificates/CN=abc"
|
||||
openssl x509 -req -sha256 -days 1024 -in certs/abc.tls.csr -CA certs/RootCA.pem -CAkey certs/RootCA.key -CAcreateserial -extfile certs/abc_domains.ext -out certs/abc.tls.crt
|
||||
|
||||
clean-test-certs:
|
||||
rm -r certs || true
|
||||
|
||||
# Test targets
|
||||
test: fmt vet
|
||||
test: fmt vet test-certs
|
||||
go test ./...
|
||||
|
||||
e2e-test:
|
||||
go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-setup:
|
||||
ONLY_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
e2e-test-local:
|
||||
SKIP_SETUP=true go run -tags e2e ./tests/run-all.go
|
||||
|
||||
# Docker targets
|
||||
docker-build-operator:
|
||||
DOCKER_BUILDKIT=1 docker build . -t ${IMAGE_OPERATOR_VERSIONED_TAG} -t ${IMAGE_OPERATOR_SHA_TAG} -f operator/Dockerfile --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT}
|
||||
|
@ -83,11 +128,21 @@ publish-scaler-multiarch:
|
|||
|
||||
publish-multiarch: publish-operator-multiarch publish-interceptor-multiarch publish-scaler-multiarch
|
||||
|
||||
release: manifests kustomize ## Produce new KEDA Http Add-on release in keda-add-ons-http-$(VERSION).yaml file.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
$(KUSTOMIZE) build config/default > keda-add-ons-http-$(VERSION).yaml
|
||||
$(KUSTOMIZE) build config/crd > keda-add-ons-http-$(VERSION)-crds.yaml
|
||||
|
||||
# Development
|
||||
|
||||
generate: codegen manifests mockgen ## Generate code, manifests, and mocks.
|
||||
generate: codegen mockgen manifests ## Generate code, manifests, and mocks.
|
||||
|
||||
verify: verify-codegen verify-manifests verify-mockgen ## Verify code, manifests, and mocks.
|
||||
verify: verify-codegen verify-mockgen verify-manifests ## Verify code, manifests, and mocks.
|
||||
|
||||
codegen: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile='hack/boilerplate.go.txt' paths='./...'
|
||||
|
@ -104,6 +159,14 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust
|
|||
verify-manifests: ## Verify manifests are up to date.
|
||||
./hack/verify-manifests.sh
|
||||
|
||||
sign-images: ## Sign KEDA images published on GitHub Container Registry
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_OPERATOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_INTERCEPTOR_SHA_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_VERSIONED_TAG)
|
||||
COSIGN_EXPERIMENTAL=1 cosign sign ${COSIGN_FLAGS} $(IMAGE_SCALER_SHA_TAG)
|
||||
|
||||
mockgen: ## Generate mock implementations of Go interfaces.
|
||||
./hack/update-mockgen.sh
|
||||
|
||||
|
@ -124,19 +187,37 @@ pre-commit: ## Run static-checks.
|
|||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.12.0
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/kustomize/kustomize/v5@v5.0.3
|
||||
GOBIN=$(shell pwd)/bin go install sigs.k8s.io/kustomize/kustomize/v5
|
||||
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
deploy: manifests kustomize ## Deploy to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-interceptor=${IMAGE_INTERCEPTOR_VERSIONED_TAG}
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/scaledobject.yaml --group keda.sh --kind ScaledObject --name interceptor --version v1alpha1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/deployment.yaml --group apps --kind Deployment --name interceptor --version v1
|
||||
|
||||
cd config/interceptor && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/tls/proxy.service.yaml --kind Service --name interceptor-proxy --version v1
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-scaler=${IMAGE_SCALER_VERSIONED_TAG}
|
||||
|
||||
cd config/scaler && \
|
||||
$(KUSTOMIZE) edit add patch --path e2e-test/otel/deployment.yaml --group apps --kind Deployment --name scaler --version v1
|
||||
|
||||
cd config/operator && \
|
||||
$(KUSTOMIZE) edit set image ghcr.io/kedacore/http-add-on-operator=${IMAGE_OPERATOR_VERSIONED_TAG}
|
||||
|
||||
|
@ -144,8 +225,3 @@ deploy: manifests kustomize ## Deploy to the K8s cluster specified in ~/.kube/co
|
|||
|
||||
undeploy:
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
|
||||
kind-load:
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-operator:${VERSION}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-interceptor:${VERSION}
|
||||
kind load docker-image ghcr.io/kedacore/http-add-on-scaler:${VERSION}
|
||||
|
|
|
@ -22,7 +22,7 @@ The KEDA HTTP Add-on allows Kubernetes users to automatically scale their HTTP s
|
|||
|
||||
| 🚧 **Project status: beta** 🚧|
|
||||
|---------------------------------------------|
|
||||
| :loudspeaker: **KEDA is actively relying on community contributions to help grow & maintain the add-on. The KEDA maintainers are assisting the community to evolve the add-on but not directly responsible for it.** Feel free to [open a new discussion](https://github.com/kedacore/http-add-on/discussions/new/choose) in case of questions.<br/><br/>⚠ The HTTP Add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/latest). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support. |
|
||||
| :loudspeaker: **KEDA is actively relying on community contributions to help grow & maintain the add-on. The KEDA maintainers are assisting the community to evolve the add-on but not directly responsible for it.** Feel free to [open a new discussion](https://github.com/kedacore/http-add-on/discussions/new/choose) in case of questions.<br/><br/>⚠ The HTTP Add-on currently is in [beta](https://github.com/kedacore/http-add-on/releases/latest). We can't yet recommend it for production usage because we are still developing and testing it. It may have "rough edges" including missing documentation, bugs and other issues. It is currently provided as-is without support.<br/><br/>:bulb: For production-ready needs, you can consider using the [Kedify HTTP Scaler](https://kedify.io/scalers/http), a commercial alternative offering robust and reliable scaling for KEDA. |
|
||||
|
||||
## HTTP Autoscaling Made Simple
|
||||
|
||||
|
@ -75,7 +75,7 @@ This project follows the KEDA contributing guidelines, which are outlined in [CO
|
|||
If you would like to contribute code to this project, please see [docs/developing.md](./docs/developing.md).
|
||||
|
||||
---
|
||||
We are a Cloud Native Computing Foundation (CNCF) incubation project.
|
||||
We are a Cloud Native Computing Foundation (CNCF) graduated project.
|
||||
<p align="center"><img src="https://raw.githubusercontent.com/kedacore/keda/main/images/logo-cncf.svg" height="75px"></p>
|
||||
|
||||
## Code of Conduct
|
||||
|
|
|
@ -10,7 +10,27 @@ Please go to the [releases page](https://github.com/kedacore/http-add-on/release
|
|||
|
||||
To determine the new version, follow [SemVer guidelines](https://semver.org). Most releases will increment the PATCH or MINOR version number.
|
||||
|
||||
## 2: Create a new GitHub release
|
||||
## 2. Changelog
|
||||
|
||||
Add a new section in [CHANGELOG.md](CHANGELOG.md) for the new version that is being released along with the new features, patches and deprecations it introduces.
|
||||
|
||||
It should not include every single change but solely what matters to our customers, for example issue template that has changed is not important.
|
||||
|
||||
## 3. Add the new version to GitHub Bug report template
|
||||
|
||||
Add the new released version to the list in `KEDA Version` dropdown in [2_bug_report.yml](https://github.com/kedacore/http-add-on/blob/main/.github/ISSUE_TEMPLATE/2_bug_report.yml).
|
||||
|
||||
## 4. Update documentation references to current version
|
||||
|
||||
Update the links to current version within the file `walkthrough.md`
|
||||
|
||||
> ```console
|
||||
> kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
> ```
|
||||
|
||||
> >If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](THE REFERENCE).
|
||||
|
||||
## 5: Create a new GitHub release
|
||||
|
||||
[Create a new release](https://github.com/kedacore/http-add-on/releases/new) on the GitHub releases page, using your new release number.
|
||||
|
||||
|
@ -20,7 +40,7 @@ The release description should be a short to medium length summary of what has c
|
|||
|
||||
After you create the new release, automation in a GitHub action will build and deploy new container images.
|
||||
|
||||
## 3: Submit a PR to the [Helm Charts Repository](https://github.com/kedacore/charts)
|
||||
## 6: Submit a PR to the [Helm Charts Repository](https://github.com/kedacore/charts)
|
||||
|
||||
The scope of the changes you'll need to make to the Helm chart vary, but the below list is the minimum set of fields to change:
|
||||
|
||||
|
@ -44,7 +64,7 @@ images:
|
|||
tag: 1.2.3
|
||||
```
|
||||
|
||||
>Note: The container images generated by CI/CD in step 2 will have the same tag as the tag you created in the release, minus the `v` prefix. You can always see what images created by going to the container registry page for the [interceptor](https://github.com/orgs/kedacore/packages/container/package/http-add-on-interceptor), [operator](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator) or [scaler](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler)
|
||||
>Note: The container images generated by CI/CD in step 2 will have the same tag as the tag you created in the release, minus the `v` prefix. You can always see what images created by going to the container registry page for the [interceptor](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-interceptor), [operator](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-operator) or [scaler](https://github.com/kedacore/http-add-on/pkgs/container/http-add-on-scaler)
|
||||
|
||||
|
||||
Once you've made changes to the chart, here's how to do submit the change to the charts repository:
|
||||
|
@ -56,11 +76,11 @@ Once you've made changes to the chart, here's how to do submit the change to the
|
|||
|
||||
After your PR is merged, you've completed the release. Congratulations! You can optionally write a blog post about it; see the next section if you're interested.
|
||||
|
||||
## 4: Publish release on Artifact Hub
|
||||
## 7: Publish release on Artifact Hub
|
||||
|
||||
Publish release on Artifact Hub by creating a new version in [kedacore/external-scalers](https://github.com/kedacore/external-scalers/tree/main/artifacthub/add-ons-http).
|
||||
|
||||
## 5: Write a blog post on the documentation site (_optional_)
|
||||
## 8: Write a blog post on the documentation site (_optional_)
|
||||
|
||||
If you believe that your release is large enough to warrant a blog post on the [keda.sh/blog](https://keda.sh/blog/) site, please go to [github.com/kedacore/keda-docs](https://github.com/kedacore/keda-docs) and submit a new PR with a blog article about the release.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: httpscaledobjects.http.keda.sh
|
||||
spec:
|
||||
group: http.keda.sh
|
||||
|
@ -17,15 +17,12 @@ spec:
|
|||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.scaleTargetRef.deploymentName
|
||||
name: ScaleTargetDeploymentName
|
||||
- jsonPath: .status.targetWorkload
|
||||
name: TargetWorkload
|
||||
type: string
|
||||
- jsonPath: .spec.scaleTargetRef
|
||||
name: ScaleTargetServiceName
|
||||
- jsonPath: .status.targetService
|
||||
name: TargetService
|
||||
type: string
|
||||
- jsonPath: .spec.scaleTargetRef
|
||||
name: ScaleTargetPort
|
||||
type: integer
|
||||
- jsonPath: .spec.replicas.min
|
||||
name: MinReplicas
|
||||
type: integer
|
||||
|
@ -35,7 +32,7 @@ spec:
|
|||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- jsonPath: .status.conditions[?(@.type=="HTTPScaledObjectIsReady")].status
|
||||
- jsonPath: .status.conditions[?(@.reason=="HTTPScaledObjectIsReady")].status
|
||||
name: Active
|
||||
type: string
|
||||
name: v1alpha1
|
||||
|
@ -44,43 +41,70 @@ spec:
|
|||
description: HTTPScaledObject is the Schema for the httpscaledobjects API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HTTPScaledObjectSpec defines the desired state of HTTPScaledObject
|
||||
properties:
|
||||
host:
|
||||
description: (optional) (deprecated) The host to route. All requests
|
||||
which the "Host" header matches .spec.host and the Request Target
|
||||
matches any .spec.pathPrefixes will be routed to the Service and
|
||||
Port specified in the scaleTargetRef. The .spec.host field is mutually
|
||||
exclusive with the .spec.hosts field.
|
||||
type: string
|
||||
coldStartTimeoutFailoverRef:
|
||||
description: (optional) The name of the failover service to route
|
||||
HTTP requests to when the target is not available
|
||||
properties:
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
timeoutSeconds:
|
||||
default: 30
|
||||
description: The timeout in seconds to wait before routing to
|
||||
the failover service (Default 30)
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
hosts:
|
||||
description: (optional) The hosts to route. All requests which the
|
||||
"Host" header matches any .spec.hosts and the Request Target matches
|
||||
any .spec.pathPrefixes will be routed to the Service and Port specified
|
||||
in the scaleTargetRef. The .spec.hosts field is mutually exclusive
|
||||
with the .spec.host field.
|
||||
description: |-
|
||||
The hosts to route. All requests which the "Host" header
|
||||
matches any .spec.hosts (and the Request Target matches any
|
||||
.spec.pathPrefixes) will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
initialCooldownPeriod:
|
||||
description: (optional) Initial period before scaling
|
||||
format: int32
|
||||
type: integer
|
||||
pathPrefixes:
|
||||
description: (optional) The paths to route. All requests which the
|
||||
Request Target matches any .spec.pathPrefixes and the "Host" header
|
||||
matches any .spec.hosts will be routed to the Service and Port specified
|
||||
in the scaleTargetRef. The .spec.hosts field is mutually exclusive
|
||||
with the .spec.host field. When this field is null, any path is
|
||||
matched.
|
||||
description: |-
|
||||
The paths to route. All requests which the Request Target matches any
|
||||
.spec.pathPrefixes (and the "Host" header matches any .spec.hosts)
|
||||
will be routed to the Service and Port specified in
|
||||
the scaleTargetRef.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
@ -99,33 +123,87 @@ spec:
|
|||
type: integer
|
||||
type: object
|
||||
scaleTargetRef:
|
||||
description: The name of the deployment to route HTTP requests to
|
||||
(and to autoscale).
|
||||
description: |-
|
||||
The name of the deployment to route HTTP requests to (and to autoscale).
|
||||
Including validation as a requirement to define either the PortName or the Port
|
||||
properties:
|
||||
deployment:
|
||||
description: The name of the deployment to scale according to
|
||||
HTTP traffic
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
port:
|
||||
description: The port to route to
|
||||
format: int32
|
||||
type: integer
|
||||
portName:
|
||||
description: The port to route to referenced by name
|
||||
type: string
|
||||
service:
|
||||
description: The name of the service to route to
|
||||
type: string
|
||||
required:
|
||||
- deployment
|
||||
- port
|
||||
- service
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: must define either the 'portName' or the 'port'
|
||||
rule: has(self.portName) != has(self.port)
|
||||
scaledownPeriod:
|
||||
description: (optional) Cooldown period value
|
||||
format: int32
|
||||
type: integer
|
||||
scalingMetric:
|
||||
description: (optional) Configuration for the metric used for scaling
|
||||
properties:
|
||||
concurrency:
|
||||
description: Scaling based on concurrent requests for a given
|
||||
target
|
||||
properties:
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
type: object
|
||||
requestRate:
|
||||
description: Scaling based the average rate during an specific
|
||||
time window for a given target
|
||||
properties:
|
||||
granularity:
|
||||
default: 1s
|
||||
description: Time granularity for rate calculation
|
||||
type: string
|
||||
targetValue:
|
||||
default: 100
|
||||
description: Target value for rate scaling
|
||||
type: integer
|
||||
window:
|
||||
default: 1m
|
||||
description: Time window for rate calculation
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
targetPendingRequests:
|
||||
description: (optional) Target metric value
|
||||
description: (optional) DEPRECATED (use ScalingMetric instead) Target
|
||||
metric value
|
||||
format: int32
|
||||
type: integer
|
||||
timeouts:
|
||||
description: (optional) Timeouts that override the global ones
|
||||
properties:
|
||||
conditionWait:
|
||||
description: How long to wait for the backing workload to have
|
||||
1 or more replicas before connecting and sending the HTTP request
|
||||
(Default is set by the KEDA_CONDITION_WAIT_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
responseHeader:
|
||||
description: How long to wait between when the HTTP request is
|
||||
sent to the backing app and when response headers need to arrive
|
||||
(Default is set by the KEDA_RESPONSE_HEADER_TIMEOUT environment
|
||||
variable)
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- scaleTargetRef
|
||||
type: object
|
||||
|
@ -160,12 +238,6 @@ spec:
|
|||
type:
|
||||
description: Type of condition
|
||||
enum:
|
||||
- Created
|
||||
- Terminated
|
||||
- Error
|
||||
- Pending
|
||||
- Terminating
|
||||
- Unknown
|
||||
- Ready
|
||||
type: string
|
||||
required:
|
||||
|
@ -173,6 +245,12 @@ spec:
|
|||
- type
|
||||
type: object
|
||||
type: array
|
||||
targetService:
|
||||
description: TargetService reflects details about the scaled service.
|
||||
type: string
|
||||
targetWorkload:
|
||||
description: TargetWorkload reflects details about the scaled workload.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
|
|
@ -6,7 +6,7 @@ resources:
|
|||
- ../operator
|
||||
- ../scaler
|
||||
namespace: keda
|
||||
namePrefix: keda-http-add-on-
|
||||
namePrefix: keda-add-ons-http-
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
|
|
|
@ -23,9 +23,11 @@ spec:
|
|||
containers:
|
||||
- name: interceptor
|
||||
image: ghcr.io/kedacore/http-add-on-interceptor
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_ROUTING_TABLE_UPDATE_DURATION_MS
|
||||
value: "500"
|
||||
- name: KEDA_HTTP_CURRENT_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_PROXY_PORT
|
||||
|
@ -40,7 +42,7 @@ spec:
|
|||
value: "500ms"
|
||||
- name: KEDA_CONDITION_WAIT_TIMEOUT
|
||||
value: "20s"
|
||||
- name: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS
|
||||
- name: KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS
|
||||
value: "1000"
|
||||
- name: KEDA_HTTP_FORCE_HTTP2
|
||||
value: "false"
|
||||
|
@ -57,6 +59,8 @@ spec:
|
|||
containerPort: 9090
|
||||
- name: proxy
|
||||
containerPort: 8080
|
||||
- name: metrics
|
||||
containerPort: 2223
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
env:
|
||||
- name: OTEL_PROM_EXPORTER_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_PROM_EXPORTER_PORT
|
||||
value: "2223"
|
||||
- name: OTEL_EXPORTER_OTLP_METRICS_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318"
|
||||
- name: OTEL_METRIC_EXPORT_INTERVAL
|
||||
value: "1"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENABLED
|
||||
value: "true"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
|
||||
value: "http/protobuf"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||
value: "http://opentelemetry-collector.open-telemetry-system:4318/v1/traces"
|
||||
- name: OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
||||
value: "true"
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- scaledobject.yaml
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
minReplicaCount: 1
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: interceptor
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
containerPort: 8443
|
||||
env:
|
||||
- name: KEDA_HTTP_PROXY_TLS_ENABLED
|
||||
value: "true"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_PATH
|
||||
value: "/certs/tls.crt"
|
||||
- name: KEDA_HTTP_PROXY_TLS_KEY_PATH
|
||||
value: "/certs/tls.key"
|
||||
- name: KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS
|
||||
value: "/additional-certs"
|
||||
- name: KEDA_HTTP_PROXY_TLS_PORT
|
||||
value: "8443"
|
||||
volumeMounts:
|
||||
- readOnly: true
|
||||
mountPath: "/certs"
|
||||
name: certs
|
||||
- readOnly: true
|
||||
mountPath: "/additional-certs/abc-certs"
|
||||
name: abc-certs
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: keda-tls
|
||||
- name: abc-certs
|
||||
secret:
|
||||
secretName: abc-certs
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- proxy.service.yaml
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-proxy
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: proxy-tls
|
||||
protocol: TCP
|
||||
port: 8443
|
||||
targetPort: proxy-tls
|
|
@ -6,7 +6,11 @@ resources:
|
|||
- role_binding.yaml
|
||||
- admin.service.yaml
|
||||
- proxy.service.yaml
|
||||
- metrics.service.yaml
|
||||
- service_account.yaml
|
||||
- scaledobject.yaml
|
||||
configurations:
|
||||
- transformerconfig.yaml
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
includeTemplates: true
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: interceptor-metrics
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 2223
|
||||
targetPort: metrics
|
|
@ -5,9 +5,17 @@ metadata:
|
|||
name: interceptor
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
- ""
|
||||
resources:
|
||||
- deployments
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: interceptor
|
||||
spec:
|
||||
minReplicaCount: 3
|
||||
maxReplicaCount: 50
|
||||
pollingInterval: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: interceptor
|
||||
triggers:
|
||||
- type: external
|
||||
metadata:
|
||||
scalerAddress: external-scaler:9090
|
||||
interceptorTargetPendingRequests: '200'
|
|
@ -0,0 +1,5 @@
|
|||
namePrefix:
|
||||
- kind: ScaledObject
|
||||
path: spec/scaleTargetRef/name
|
||||
- kind: ScaledObject
|
||||
path: spec/triggers/metadata/scalerAddress
|
|
@ -1,7 +0,0 @@
|
|||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: routing-table
|
||||
data:
|
||||
routing-table: "{}"
|
|
@ -24,18 +24,18 @@ spec:
|
|||
image: ghcr.io/kedacore/http-add-on-operator
|
||||
args:
|
||||
- --leader-elect
|
||||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
- --admin-port=9090
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_SERVICE
|
||||
value: "keda-http-add-on-external-scaler"
|
||||
value: "keda-add-ons-http-external-scaler"
|
||||
- name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_OPERATOR_NAMESPACE
|
||||
value: "keda"
|
||||
- name: KEDA_HTTP_OPERATOR_WATCH_NAMESPACE
|
||||
value: ""
|
||||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- config_map.yaml
|
||||
- deployment.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- service.yaml
|
||||
- service_account.yaml
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
|
|
|
@ -49,18 +49,6 @@ metadata:
|
|||
name: operator
|
||||
namespace: keda
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: operator-admin
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: admin
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
targetPort: admin
|
|
@ -3,6 +3,7 @@ kind: Deployment
|
|||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
|
@ -22,20 +23,22 @@ spec:
|
|||
containers:
|
||||
- name: scaler
|
||||
image: ghcr.io/kedacore/http-add-on-scaler
|
||||
args:
|
||||
- --zap-log-level=info
|
||||
- --zap-encoder=console
|
||||
- --zap-time-encoding=rfc3339
|
||||
env:
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_DEPLOYMENT
|
||||
value: "keda-http-add-on-interceptor"
|
||||
value: "keda-add-ons-http-interceptor"
|
||||
- name: KEDA_HTTP_SCALER_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_NAMESPACE
|
||||
value: "keda"
|
||||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_SERVICE
|
||||
value: "keda-http-add-on-interceptor-admin"
|
||||
# TODO(pedrotorres): remove after implementing new routing table
|
||||
value: "keda-add-ons-http-interceptor-admin"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_ADMIN_PORT
|
||||
value: "9090"
|
||||
- name: KEDA_HTTP_SCALER_TARGET_PENDING_REQUESTS_INTERCEPTOR
|
||||
- name: KEDA_HTTP_SCALER_STREAM_INTERVAL_MS
|
||||
value: "200"
|
||||
ports:
|
||||
- name: grpc
|
||||
|
@ -44,10 +47,18 @@ spec:
|
|||
grpc:
|
||||
port: 9090
|
||||
service: liveness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
grpc:
|
||||
port: 9090
|
||||
service: readiness
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
# TODO(pedrotorres): set better default values avoiding overcommitment
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: scaler
|
||||
spec:
|
||||
replicas: 1
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- deployment.yaml
|
|
@ -12,14 +12,6 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- http.keda.sh
|
||||
resources:
|
||||
|
@ -28,18 +20,3 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: scaler
|
||||
namespace: keda
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
<img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=bd8914ff-fcda-4c0c-ab57-6fc671ae6cff" style="display:none;" />
|
|
@ -23,7 +23,7 @@ The [operator](../operator) runs inside the Kubernetes namespace to which they'r
|
|||
|
||||
- Update an internal routing table that maps incoming HTTP hostnames to internal applications.
|
||||
- Furnish this routing table information to interceptors so that they can properly route requests.
|
||||
- Create a [`ScaledObject`](https://keda.sh/docs/2.3/concepts/scaling-deployments/#scaledobject-spec) for the `Deployment` specified in the `HTTPScaledObject` resource.
|
||||
- Create a [`ScaledObject`](https://keda.sh/docs/latest/concepts/scaling-deployments/#scaledobject-spec) for the `Deployment` specified in the `HTTPScaledObject` resource.
|
||||
|
||||
When the `HTTPScaledObject` is deleted, the operator reverses all of the aforementioned actions.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Why does this project route HTTP requests?
|
||||
|
||||
In order to autoscale a `Deployment`, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
In order to autoscale a workload, KEDA-HTTP needs to be involved with routing HTTP requests. However, the project is minimally involved with routing and we're working on ways to get out of the "critical path" of an HTTP request as much as possible. For more information, please see our [scope](./scope.md) document.
|
||||
|
||||
## How is this project similar or different from [Osiris](https://github.com/deislabs/osiris)?
|
||||
|
||||
|
@ -13,7 +13,7 @@ Osiris and KEDA-HTTP have similar features:
|
|||
|
||||
However, Osiris and KEDA-HTTP differ in several ways:
|
||||
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- Autoscaling concerns are implemented separately from the application resources - `Service`, `Ingress`, `Deployment`, `StatefulSet`, `/scale` and more in KEDA-HTTP. With Osiris, those concerns are baked into each app resource.
|
||||
- The KEDA-HTTP operator can automatically deploy and configure networking and compute resources necessary for an HTTP application to autoscale. Osiris does not have this functionality.
|
||||
- Osiris is currently archived in GitHub.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
The HTTP Add-on is highly modular and, as expected, builds on top of KEDA core. Below are some additional components:
|
||||
|
||||
- **Operator** - watches for `ScaledHTTPObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Operator** - watches for `HTTPScaledObject` CRD resources and creates necessary backing Kubernetes resources (e.g. `Deployment`s, `Service`s, `ScaledObject`s, and so forth)
|
||||
- **Scaler** - communicates scaling-related metrics to KEDA. By default, the operator will install this for you as necessary.
|
||||
- **Interceptor** - a cluster-internal proxy that proxies incoming HTTP requests, communicating HTTP queue size metrics to the scaler, and holding requests in a temporary request queue when there are not yet any available app `Pod`s ready to serve. By default, the operator will install this for you as necessary.
|
||||
>There is [pending work](https://github.com/kedacore/http-add-on/issues/354) that may eventually make this component optional.
|
||||
|
@ -19,9 +19,9 @@ Before you install any of these components, you need to install KEDA. Below are
|
|||
>This document will rely on environment variables such as `${NAMESPACE}` to indicate a value you should customize and provide to the relevant command. In the below `helm install` command, `${NAMESPACE}` should be the namespace you'd like to install KEDA into.
|
||||
|
||||
```console
|
||||
$ helm repo add kedacore https://kedacore.github.io/charts
|
||||
$ helm repo update
|
||||
$ helm install keda kedacore/keda --namespace ${NAMESPACE} --create-namespace
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm install keda kedacore/keda --namespace ${NAMESPACE} --create-namespace
|
||||
```
|
||||
|
||||
>The above command installs KEDA in cluster-global mode. Add `--set watchNamespace=<target namespace>` to install KEDA in namespaced mode.
|
||||
|
@ -49,7 +49,7 @@ There are a few values that you can pass to the above `helm install` command by
|
|||
>If you want to install the latest build of the HTTP Add-on, set `version` to `canary`:
|
||||
|
||||
```console
|
||||
$ helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE} --set images.tag=canary
|
||||
helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace ${NAMESPACE} --set images.tag=canary
|
||||
```
|
||||
|
||||
For an exhaustive list of configuration options, see the official HTTP Add-on chart [values.yaml file](https://github.com/kedacore/charts/blob/master/http-add-on/values.yaml).
|
||||
|
@ -59,23 +59,30 @@ For an exhaustive list of configuration options, see the official HTTP Add-on ch
|
|||
Local clusters like [Microk8s](https://microk8s.io/) offer in-cluster image registries. These are popular tools to speed up and ease local development. If you use such a tool for local development, we recommend that you use and push your images to its local registry. When you do, you'll want to set your `images.*` variables to the address of the local registry. In the case of MicroK8s, that address is `localhost:32000` and the `helm install` command would look like the following:
|
||||
|
||||
```console
|
||||
$ helm repo add kedacore https://kedacore.github.io/charts
|
||||
$ helm repo update
|
||||
$ helm pull kedacore/keda-add-ons-http --untar --untardir ./charts
|
||||
$ helm upgrade kedahttp ./charts/keda-add-ons-http \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
helm repo update
|
||||
helm pull kedacore/keda-add-ons-http --untar --untardir ./charts
|
||||
helm upgrade kedahttp ./charts/keda-add-ons-http \
|
||||
--install \
|
||||
--namespace ${NAMESPACE} \
|
||||
--create-namespace \
|
||||
--set image=localhost:32000/keda-http-operator \
|
||||
--set images.scaler=localhost:32000/keda-http-scaler \
|
||||
--set images.interceptor=localhost:32000/keda-http-interceptor
|
||||
```
|
||||
|
||||
## Compatibility Table
|
||||
|
||||
| HTTP Add-On version | KEDA version | Kubernetes version |
|
||||
|---------------------|--------------|--------------------|
|
||||
| 0.5.0 | v2.9 | v1.23 - v1.25 |
|
||||
| HTTP Add-On version | KEDA version | Kubernetes version |
|
||||
|---------------------|-------------------|--------------------|
|
||||
| main | v2.16 | v1.30 - v1.32 |
|
||||
| 0.10.0 | v2.16 | v1.30 - v1.32 |
|
||||
| 0.9.0 | v2.16 | v1.29 - v1.31 |
|
||||
| 0.8.0 | v2.14 | v1.27 - v1.29 |
|
||||
| 0.7.0 | v2.13 | v1.27 - v1.29 |
|
||||
| 0.6.0 | v2.12 | v1.26 - v1.28 |
|
||||
| 0.5.1 | v2.10 | v1.24 - v1.26 |
|
||||
| 0.5.0 | v2.9 | v1.23 - v1.25 |
|
||||
|
||||
## Next Steps
|
||||
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
# Integrations
|
||||
|
||||
## Istio
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Proxy Service in Virtual Service:**
|
||||
|
||||
- Within the Istio virtual service definition, add a proxy service as a route destination.
|
||||
- Set the host of this proxy service to `keda-add-ons-http-interceptor-proxy`` (the KEDA HTTP Addon interceptor service).
|
||||
- Set the port to `8080`` (the default interceptor port).
|
||||
|
||||
**Example yaml**
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: example
|
||||
namespace: default
|
||||
spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: keda-add-ons-http-interceptor-proxy
|
||||
port: 8080
|
||||
```
|
||||
|
||||
2. **Namespace Alignment:**
|
||||
|
||||
- Ensure that both the KEDA HTTP Addon and the Istio virtual service are deployed within the same Kubernetes namespace. This ensures proper communication between the components.
|
||||
|
||||
### Behavior
|
||||
|
||||
- When a user makes a request, the Istio virtual service routes it to the KEDA HTTP Addon interceptor service.
|
||||
- The interceptor service captures request metrics and relays them to the KEDA scaler component.
|
||||
- Based on these metrics and scaling rules defined in the KEDA configuration, the KEDA scaler automatically scales the target workload (e.g., a deployment) up or down (including scaling to zero).
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
1. **Error: `context marked done while waiting for workload reach > 0 replicas`**
|
||||
|
||||
- This error indicates that the `KEDA_CONDITION_WAIT_TIMEOUT` value (default: 20 seconds) might be too low. The workload scaling process may not be complete within this timeframe.
|
||||
- To increase the timeout:
|
||||
- If using Helm, adjust the `interceptor.replicas.waitTimeout`` parameter (see reference below).
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L139)
|
||||
|
||||
2. **502 Errors with POST Requests:**
|
||||
|
||||
- You might encounter 502 errors during POST requests when the request is routed through the interceptor service. This could be due to insufficient timeout settings.
|
||||
- To adjust timeout parameters:
|
||||
- If using Helm, modify the following parameters (see reference below):
|
||||
- `KEDA_HTTP_CONNECT_TIMEOUT`
|
||||
- `KEDA_RESPONSE_HEADER_TIMEOUT`
|
||||
- `KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT`
|
||||
- Reference: [https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152](https://github.com/kedacore/charts/blob/main/http-add-on/values.yaml#L152)
|
||||
|
||||
3. **Immediate Scaling Down to Zero:**
|
||||
- If `minReplica` is set to 0 in the HTTPScaledObject, the application will immediately scale down to 0.
|
||||
- There's currently no built-in mechanism to delay this initial scaling.
|
||||
- A PR is in progress to add this support: [https://github.com/kedacore/keda/pull/5478](https://github.com/kedacore/keda/pull/5478)
|
||||
- As a workaround, keep `minReplica` initially as 1 and update it to 0 after the desired delay.
|
||||
|
||||
---
|
||||
|
||||
## Azure Front Door
|
||||
|
||||
### Configuration Steps
|
||||
|
||||
1. **Service Setup in Front Door:**
|
||||
- Set up Azure Front Door to route traffic to your AKS cluster.
|
||||
- Ensure that the `Origin Host` header matches the actual AKS host. Front Door enforces case-sensitive routing, so configure the `Origin Host` exactly as the AKS host name.
|
||||
|
||||
2. **KEDA HTTP Add-on Integration:**
|
||||
- Use an `HTTPScaledObject` to manage scaling based on incoming traffic.
|
||||
- Front Door should route traffic to the KEDA HTTP Add-on interceptor service in your AKS cluster.
|
||||
|
||||
3. **Case-Sensitive Hostnames:**
|
||||
- Be mindful that Azure Front Door treats the `Origin Host` header in a case-sensitive manner.
|
||||
- Ensure consistency between the AKS ingress hostname (e.g., `foo.bar.com`) and Front Door configuration.
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
- **404 Error for Hostnames with Different Case:**
|
||||
- Requests routed with inconsistent casing (e.g., `foo.Bar.com` vs. `foo.bar.com`) will trigger 404 errors. Make sure the `Origin Host` header matches the AKS ingress host exactly.
|
||||
- If you encounter errors like `PANIC=value method k8s.io/apimachinery/pkg/types.NamespacedName.MarshalLog called using nil *NamespacedName pointer`, verify the `Origin Host` header configuration.
|
||||
|
||||
### Expected Behavior
|
||||
|
||||
- Azure Front Door routes traffic to AKS based on a case-sensitive host header.
|
||||
- The KEDA HTTP Add-on scales the workload in response to traffic, based on predefined scaling rules.
|
||||
|
||||
|
||||
---
|
|
@ -0,0 +1,62 @@
|
|||
# Configuring metrics for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Exportable metrics:
|
||||
* **Pending request count** - the number of pending requests for a given host.
|
||||
* **Total request count** - the total number of requests for a given host with method, path and response code attributes.
|
||||
|
||||
There are currently 2 supported methods for exposing metrics from the interceptor proxy service - via a Prometheus compatible metrics endpoint or by pushing metrics to a OTEL HTTP collector.
|
||||
|
||||
### Configuring the Prometheus compatible metrics endpoint
|
||||
When configured, the interceptor proxy can expose metrics on a Prometheus compatible endpoint.
|
||||
|
||||
This endpoint can be enabled by setting the `OTEL_PROM_EXPORTER_ENABLED` environment variable to `true` on the interceptor deployment (`true` by default) and by setting `OTEL_PROM_EXPORTER_PORT` to an unused port for the endpoint to be made avaialble on (`2223` by default).
|
||||
|
||||
### Configuring the OTEL HTTP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
The OTEL exporter can be enabled by setting the `OTEL_EXPORTER_OTLP_METRICS_ENABLED` environment variable to `true` on the interceptor deployment (`false` by default). When enabled the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must also be configured so the exporter knows what collector to send the metrics to (e.g. http://opentelemetry-collector.open-telemetry-system:4318).
|
||||
|
||||
If you need to provide any headers such as authentication details in order to utilise your OTEL collector you can add them into the `OTEL_EXPORTER_OTLP_HEADERS` environment variable. The frequency at which the metrics are exported can be configured by setting `OTEL_METRIC_EXPORT_INTERVAL` to the number of seconds you require between each export interval (`30` by default).
|
||||
|
||||
# Configuring TLS for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
The interceptor proxy has the ability to run both a HTTP and HTTPS server simultaneously to allow you to scale workloads that use either protocol. By default, the interceptor proxy will only serve over HTTP, but this behavior can be changed by configuring the appropriate environment variables on the deployment.
|
||||
|
||||
The TLS server can be enabled by setting the environment variable `KEDA_HTTP_PROXY_TLS_ENABLED` to `true` on the interceptor deployment (`false` by default). The TLS server will start on port `8443` by default, but this can be configured by setting `KEDA_HTTP_PROXY_TLS_PORT` to your desired port number. The TLS server will require valid TLS certificates to start, the path to the certificates can be configured via the `KEDA_HTTP_PROXY_TLS_CERT_PATH` and `KEDA_HTTP_PROXY_TLS_KEY_PATH` environment variables (`/certs/tls.crt` and `/certs/tls.key` by default).
|
||||
|
||||
For setting multiple TLS certs, set `KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS` with comma-separated list of directories that will be recursively searched for any valid cert/key pairs. Currently, two naming patterns are supported
|
||||
* `XYZ.crt` + `XYZ.key` - this is a convention when using Kubernetes Secrets of type tls
|
||||
* `XYZ.pem` + `XYZ-key.pem`
|
||||
|
||||
To disable certificate chain verification, set `KEDA_HTTP_PROXY_TLS_SKIP_VERIFY` to `false`
|
||||
|
||||
The matching between certs and requests is performed during the TLS ClientHelo message, where the SNI service name is compared to SANs provided in each cert and the first matching cert will be used for the rest of the TLS handshake.
|
||||
# Configuring tracing for the KEDA HTTP Add-on interceptor proxy
|
||||
|
||||
### Supported Exporters:
|
||||
* **console** - The console exporter is useful for development and debugging tasks, and is the simplest to set up.
|
||||
* **http/protobuf** - To send trace data to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
* * **grpc** - To configure exporter to send trace data over gRPC connection to an OTLP endpoint (like the collector or Jaeger >= v1.35.0) you’ll want to configure an OTLP exporter that sends to your endpoint.
|
||||
|
||||
### Configuring tracing with console exporter
|
||||
|
||||
To enable tracing with the console exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `console` (`console` by default). Other protocols include (`http/protobuf` and `grpc`).
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to `"http://localhost:4318/v1/traces"` (`"http://localhost:4318/v1/traces"` by default).
|
||||
|
||||
|
||||
### Configuring tracing with OTLP exporter
|
||||
When configured, the interceptor proxy can export metrics to a OTEL HTTP collector.
|
||||
|
||||
To enable tracing with otlp exporter, the `OTEL_EXPORTER_OTLP_TRACES_ENABLED` environment variable should be set to `true` on the interceptor deployment. (`false` by default).
|
||||
Secondly set `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` to `otlphttp` (`console` by default). Other protocols include (`http/protobuf` and `grpc`)
|
||||
Finally set `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` to the collector to send the traces to (e.g. http://opentelemetry-collector.open-telemetry-system:4318/v1/traces) (`"http://localhost:4318/v1/traces"` by default).
|
||||
NOTE: full path is required to be set including <scheme><url><port><path>
|
||||
|
||||
|
||||
Optional variables
|
||||
`OTEL_EXPORTER_OTLP_HEADERS` - To pass any extra headers to the spans to utilise your OTEL collector e.g. authentication details (`"key1=value1,key2=value2"`)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_INSECURE` - To send traces to the tracing via HTTP rather than HTTPS (`false` by default)
|
||||
`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - The batcher timeout in seconds to send batch of data points (`5` by default)
|
||||
|
||||
### Configuring Service Failover
|
|
@ -7,5 +7,7 @@ Here is an overview of detailed documentation:
|
|||
- [Design](design.md)
|
||||
- [Use-Cases](use_cases.md)
|
||||
- [Walkthrough](walkthrough.md)
|
||||
- [Operate](operate.md)
|
||||
- [Developing](developing.md)
|
||||
- [Integrations](integrations.md)
|
||||
- [FAQ](faq.md)
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.10.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,73 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.6.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment`
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -0,0 +1,87 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.7.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`, and we'll focus on the `spec` field.
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
|
@ -0,0 +1,144 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.8.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `deployment` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
This is the name of the `Deployment` to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this `Deployment`. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the `Deployment` you gave in the `deployment` field.
|
||||
|
||||
### `targetPendingRequests` (DEPRECTATED: removed as part of v0.9.0)
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the number of _pending_ (or in-progress) requests that your application needs to have before the HTTP Add-on will scale it. Conversely, if your application has below this number of pending requests, the HTTP add-on will scale it down.
|
||||
|
||||
For example, if you set this field to 100, the HTTP Add-on will scale your app up if it sees that there are 200 in-progress requests. On the other hand, it will scale down if it sees that there are only 20 in-progress requests. Note that it will _never_ scale your app to zero replicas unless there are _no_ requests in-progress. Even if you set this value to a very high number and only have a single in-progress request, your app will still have one replica.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `v0.9.0` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -0,0 +1,136 @@
|
|||
# The `HTTPScaledObject`
|
||||
|
||||
>This document reflects the specification of the `HTTPScaledObject` resource for the `vX.X.X` version.
|
||||
|
||||
Each `HTTPScaledObject` looks approximately like the below:
|
||||
|
||||
```yaml
|
||||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric: # requestRate and concurrency are mutually exclusive
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
||||
concurrency:
|
||||
targetValue: 100
|
||||
```
|
||||
|
||||
This document is a narrated reference guide for the `HTTPScaledObject`.
|
||||
|
||||
## `httpscaledobject.keda.sh/skip-scaledobject-creation` annotation
|
||||
|
||||
This annotation will disable the ScaledObject generation and management but keeping the routing and metrics available. This is done removing the current ScaledObject if it has been already created, allowing to use user managed ScaledObjects pointing the add-on scaler directly (supporting all the ScaledObject configurations and multiple triggers). You can read more about this [here](./../../walkthrough.md#integrating-http-add-on-scaler-with-other-keda-scalers)
|
||||
|
||||
|
||||
## `hosts`
|
||||
|
||||
These are the hosts to apply this scaling rule to. All incoming requests with one of these values in their `Host` header will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `pathPrefixes`
|
||||
|
||||
>Default: "/"
|
||||
|
||||
These are the paths to apply this scaling rule to. All incoming requests with one of these values as path prefix will be forwarded to the `Service` and port specified in the below `scaleTargetRef`, and that same `scaleTargetRef`'s workload will be scaled accordingly.
|
||||
|
||||
## `scaleTargetRef`
|
||||
|
||||
This is the primary and most important part of the `spec` because it describes:
|
||||
|
||||
1. The incoming host to apply this scaling rule to.
|
||||
2. What workload to scale.
|
||||
3. The service to which to route HTTP traffic.
|
||||
|
||||
### `name`
|
||||
|
||||
This is the name of the workload to scale. It must exist in the same namespace as this `HTTPScaledObject` and shouldn't be managed by any other autoscaling system. This means that there should not be any `ScaledObject` already created for this workload. The HTTP Add-on will manage a `ScaledObject` internally.
|
||||
|
||||
### `kind`
|
||||
|
||||
This is the kind of the workload to scale.
|
||||
|
||||
### `apiVersion`
|
||||
|
||||
This is the apiVersion of the workload to scale.
|
||||
|
||||
### `service`
|
||||
|
||||
This is the name of the service to route traffic to. The add-on will create autoscaling and routing components that route to this `Service`. It must exist in the same namespace as this `HTTPScaledObject` and should route to the same `Deployment` as you entered in the `deployment` field.
|
||||
|
||||
### `port`
|
||||
|
||||
This is the port to route to on the service that you specified in the `service` field. It should be exposed on the service and should route to a valid `containerPort` on the workload you gave.
|
||||
|
||||
### `portName`
|
||||
|
||||
Alternatively, the port can be referenced using it's `name` as defined in the `Service`.
|
||||
|
||||
### `scaledownPeriod`
|
||||
|
||||
>Default: 300
|
||||
|
||||
The period to wait after the last reported active before scaling the resource back to 0.
|
||||
|
||||
> Note: This time is measured on KEDA side based on in-flight requests, so workloads with few and random traffic could have unexpected scale to 0 cases. In those case we recommend to extend this period to ensure it doesn't happen.
|
||||
|
||||
|
||||
## `scalingMetric`
|
||||
|
||||
This is the second most important part of the `spec` because it describes how the workload has to scale. This section contains 2 nested sections (`requestRate` and `concurrency`) which are mutually exclusive between themselves.
|
||||
|
||||
### `requestRate`
|
||||
|
||||
This section enables scaling based on the request rate.
|
||||
|
||||
> **NOTE**: Requests information is stored in memory, aggragating long periods (longer than 5 minutes) or too fine granularity (less than 1 second) could produce perfomance issues or memory usage increase.
|
||||
|
||||
> **NOTE 2**: Although updating `window` and/or `granularity` is something doable, the process just replaces all the stored request count infomation. This can produce unexpected scaling behaviours until the window is populated again.
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
||||
|
||||
#### `window`
|
||||
|
||||
>Default: "1m"
|
||||
|
||||
This value defines the aggregation window for the request rate calculation.
|
||||
|
||||
#### `granularity`
|
||||
|
||||
>Default: "1s"
|
||||
|
||||
This value defines the granualarity of the aggregated requests for the request rate calculation.
|
||||
|
||||
### `concurrency`
|
||||
|
||||
This section enables scaling based on the request concurrency.
|
||||
|
||||
> **NOTE**: This is the only scaling behaviour before v0.8.0
|
||||
|
||||
#### `targetValue`
|
||||
|
||||
>Default: 100
|
||||
|
||||
This is the target value for the scaling configuration.
|
|
@ -19,11 +19,11 @@ Moving this application to Kubernetes may make sense for several reasons, but th
|
|||
|
||||
If the application _is_ being moved to Kubernetes, you would follow these steps to get it autoscaling and routing with KEDA-HTTP:
|
||||
|
||||
- Create a `Deployment` and `Service`
|
||||
- Create a workload and `Service`
|
||||
- [Install](./install.md) the HTTP Add-on
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the `Deployment` and `Service` you created
|
||||
- Create a single `HTTPScaledObject` in the same namespace as the workload and `Service` you created
|
||||
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale.
|
||||
At that point, the operator will create the proper autoscaling and routing infrastructure behind the scenes and the application will be ready to scale. Any request received by the interceptor with the proper host will be routed to the proper backend.
|
||||
|
||||
## Current HTTP Server in Kubernetes
|
||||
|
||||
|
@ -36,6 +36,6 @@ In this case, the reasoning for adding the HTTP Add-on would be clear - adding a
|
|||
Getting the HTTP Add-on working can be done transparently and without downtime to the application:
|
||||
|
||||
- [Install](./install.md) the add-on. This step will have no effect on the running application.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the `Deployment` that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
- Create a new `HTTPScaledObject`. This step activates autoscaling for the workload that you specify and the application will immediately start scaling up and down based on incoming traffic through the interceptor that was created.
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -11,10 +11,17 @@ If you haven't installed KEDA and the HTTP Add-on (this project), please do so f
|
|||
You'll need to install a `Deployment` and `Service` first. You'll tell the add-on to begin scaling it up and down after this step. We've provided a [Helm](https://helm.sh) chart in this repository that you can use to try it out. Use this command to create the resources you need.
|
||||
|
||||
```console
|
||||
$ helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE}
|
||||
```
|
||||
|
||||
You'll need to clone the repository to get access to this chart. If you have your own `Deployment` and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
#### xkcd exposed with GatewayAPI
|
||||
Alternatively if you'd like to try the addon along with GatewayAPI, you can install first GatewayAPI CRDs and some GatewayAPI implementation, for example as described in a [section below](#installing-and-using-the-eg-gatewayapi) and install the application as with `httproute=true` which will deploy properly configured `HTTPRoute` too.
|
||||
|
||||
```console
|
||||
helm install xkcd ./examples/xkcd -n ${NAMESPACE} --set httproute=true
|
||||
```
|
||||
|
||||
You'll need to clone the repository to get access to this chart. If you have your own workload and `Service` installed, you can go right to creating an `HTTPScaledObject` in the next section.
|
||||
|
||||
>If you are running KEDA and the HTTP Add-on in cluster-global mode, you can install the XKCD chart in any namespace you choose. If you do so, make sure you add `--set ingressNamespace=${NAMESPACE}` to the above installation command.
|
||||
|
||||
|
@ -25,29 +32,27 @@ You'll need to clone the repository to get access to this chart. If you have you
|
|||
You interact with the operator via a CRD called `HTTPScaledObject`. This CRD object instructs interceptors to forward requests for a given host to your app's backing `Service`. To get an example app up and running, read the notes below and then run the subsequent command from the root of this repository.
|
||||
|
||||
```console
|
||||
$ kubectl create -n $NAMESPACE -f examples/v0.3.0/httpscaledobject.yaml
|
||||
kubectl apply -n $NAMESPACE -f examples/v0.10.0/httpscaledobject.yaml
|
||||
```
|
||||
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/v0.3.0/http_scaled_object.md).
|
||||
>If you'd like to learn more about this object, please see the [`HTTPScaledObject` reference](./ref/v0.10.0/http_scaled_object.md).
|
||||
|
||||
## Testing Your Installation
|
||||
|
||||
You've now installed a web application and activated autoscaling by creating an `HTTPScaledObject` for it. For autoscaling to work properly, HTTP traffic needs to route through the `Service` that the add-on has set up. You can use `kubectl port-forward` to quickly test things out:
|
||||
|
||||
```console
|
||||
$ kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:80
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
```
|
||||
|
||||
### Routing to the Right `Service`
|
||||
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add-on has created. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
As said above, you need to route your HTTP traffic to the `Service` that the add-on has created during the installation. If you have existing systems - like an ingress controller - you'll need to anticipate the name of these created `Service`s. Each one will be named consistently like so, in the same namespace as the `HTTPScaledObject` and your application (i.e. `$NAMESPACE`):
|
||||
|
||||
```console
|
||||
keda-add-ons-http-interceptor-proxy
|
||||
```
|
||||
|
||||
>This is installed by the [Helm chart](https://github.com/kedacore/charts/tree/master/http-add-on) as a `ClusterIP` `Service` by default.
|
||||
|
||||
#### Installing and Using the [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm) Ingress Controller
|
||||
|
||||
As mentioned above, the `Service` that the add-on creates will be inaccessible over the network from outside of your Kubernetes cluster.
|
||||
|
@ -64,10 +69,64 @@ helm install ingress-nginx ingress-nginx/ingress-nginx -n ${NAMESPACE}
|
|||
|
||||
An [`Ingress`](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource was already created as part of the [xkcd chart](../examples/xkcd/templates/ingress.yaml), so the installed NGINX ingress controller will initialize, detect the `Ingress`, and begin routing to the xkcd interceptor `Service`.
|
||||
|
||||
>NOTE: You may have to create an external service `type: ExternalName` pointing to the interceptor namespace and use it from `Ingress` manifest.
|
||||
|
||||
When you're ready, please run `kubectl get svc -n ${NAMESPACE}`, find the `ingress-nginx-controller` service, and copy and paste its `EXTERNAL-IP`. This is the IP address that your application will be running at on the public internet.
|
||||
|
||||
>Note: you should go further and set your DNS records appropriately and set up a TLS certificate for this IP address. Instructions to do that are out of scope of this document, though.
|
||||
|
||||
#### Installing and Using the [eg](https://gateway.envoyproxy.io/latest/install/install-helm/) GatewayAPI
|
||||
|
||||
Similarly to exposing your service with `Ingress`, you can expose your service with `HTTPRoute` as part of [GatewayAPI](https://github.com/kubernetes-sigs/gateway-api). Following steps describe how to install one of may GatewayAPI implementations - Envoy Gateway.
|
||||
You should install the `xkcd` helm chart with `--set httproute=true` as [explained above](#xkcd-exposed-with-gatewayapi).
|
||||
|
||||
The helm chart is publically available and hosted by DockerHub
|
||||
```console
|
||||
helm install eg oci://docker.io/envoyproxy/gateway-helm --version v1.0.2 -n envoy-gateway-system --create-namespace
|
||||
```
|
||||
Before creating new `Gateway`, wait for Envoy Gateway to become available
|
||||
```console
|
||||
kubectl wait --timeout=5m -n envoy-gateway-system deployment/envoy-gateway --for=condition=Available
|
||||
```
|
||||
Create `GatewayClass` and `Gateway`
|
||||
```console
|
||||
cat << 'EOF' | kubectl apply -f -
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: GatewayClass
|
||||
metadata:
|
||||
name: eg
|
||||
spec:
|
||||
controllerName: gateway.envoyproxy.io/gatewayclass-controller
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
spec:
|
||||
gatewayClassName: eg
|
||||
listeners:
|
||||
- name: http
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
EOF
|
||||
```
|
||||
> 💡 Note the `ExternalName` type `Service` used to route traffic from `Ingress` defined in one namespace to the interceptor `Service` defined in another is not necessary with GatewayAPI.
|
||||
> The GatewayAPI defines [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) to allow `HTTPRoutes` referencing `Services` and other types of backend from different `Namespaces`.
|
||||
|
||||
You can see the IP address for following rest of the document with
|
||||
```console
|
||||
kubectl get gateway -n envoy-gateway-system
|
||||
```
|
||||
For example (your IP will likely differ)
|
||||
```
|
||||
NAME CLASS ADDRESS PROGRAMMED AGE
|
||||
eg eg 172.24.255.201 True 16s
|
||||
```
|
||||
|
||||
### Making an HTTP Request to your App
|
||||
|
||||
Now that you have your application running and your ingress configured, you can issue an HTTP request. To do so, you'll need to know the IP address to request. If you're using an ingress controller, that is the IP of the ingress controller's `Service`. If you're using a "raw" `Service` with `type: LoadBalancer`, that is the IP address of the `Service` itself.
|
||||
|
@ -75,9 +134,53 @@ Now that you have your application running and your ingress configured, you can
|
|||
Regardless, you can use the below `curl` command to make a request to your application:
|
||||
|
||||
```console
|
||||
curl -H "Host: myhost.com" <Your IP>
|
||||
curl -H "Host: myhost.com" <Your IP>/test
|
||||
```
|
||||
|
||||
>Note the `-H` flag above to specify the `Host` header. This is needed to tell the interceptor how to route the request. If you have a DNS name set up for the IP, you don't need this header.
|
||||
|
||||
You can also use port-forward to interceptor service for making the request:
|
||||
|
||||
```console
|
||||
kubectl port-forward svc/keda-add-ons-http-interceptor-proxy -n ${NAMESPACE} 8080:8080
|
||||
curl -H "Host: myhost.com" localhost:8080/test
|
||||
```
|
||||
|
||||
### Integrating HTTP Add-On Scaler with other KEDA scalers
|
||||
|
||||
For scenerios where you want to integrate HTTP Add-On scaler with other keda scalers, you can set the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation to true on your `HTTPScaledObject`. The reconciler will then skip the KEDA core ScaledObject creation which will allow you to create your own `ScaledObject` and add HTTP scaler as one of your triggers.
|
||||
|
||||
> 💡 Ensure that your ScaledObject is created with a different name than the `HTTPScaledObject` to ensure your ScaledObject is not removed by the reconciler.
|
||||
|
||||
If you don't know how to set the external scaler in the ScaledObject, you can deploy first your HTTPScaledObject with no annotation set in order to obtain the latest trigger spec to use on your own managed ScaledObject.
|
||||
|
||||
1. Deploy your `HTTPScaledObject` with annotation set to false
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "false"
|
||||
```
|
||||
|
||||
2. Take copy of the current generated external-push trigger spec on the generated ScaledObject.
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
triggers:
|
||||
- type: external-push
|
||||
metadata:
|
||||
httpScaledObject: YOUR_HTTPSCALEDOBJECT_NAME
|
||||
scalerAddress: keda-add-ons-http-external-scaler.keda:9090
|
||||
```
|
||||
|
||||
3. Apply the `"httpscaledobject.keda.sh/skip-scaledobject-creation"` annotation with `true` and apply the change. This will remove the originally created `ScaledObject` allowing you to create your own.
|
||||
|
||||
```console
|
||||
annotations:
|
||||
httpscaledobject.keda.sh/skip-scaledobject-creation: "true"
|
||||
```
|
||||
|
||||
4. Add the `external-push` trigger taken from step 2 to your own ScaledObject and apply this.
|
||||
|
||||
|
||||
[Go back to landing page](./)
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,16 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
deployment: xkcd
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -0,0 +1,18 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 5
|
||||
max: 10
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,24 @@
|
|||
kind: HTTPScaledObject
|
||||
apiVersion: http.keda.sh/v1alpha1
|
||||
metadata:
|
||||
name: xkcd
|
||||
spec:
|
||||
hosts:
|
||||
- myhost.com
|
||||
pathPrefixes:
|
||||
- /test
|
||||
scaleTargetRef:
|
||||
name: xkcd
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: xkcd
|
||||
port: 8080
|
||||
replicas:
|
||||
min: 1
|
||||
max: 10
|
||||
scaledownPeriod: 300
|
||||
scalingMetric:
|
||||
requestRate:
|
||||
granularity: 1s
|
||||
targetValue: 100
|
||||
window: 1m
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}-proxy
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: keda-add-ons-http-interceptor-proxy.keda
|
|
@ -0,0 +1,39 @@
|
|||
{{- if .Values.httproute }}
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: eg
|
||||
namespace: envoy-gateway-system
|
||||
hostnames:
|
||||
{{- range .Values.hosts }}
|
||||
- {{ . | toString }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- backendRefs:
|
||||
- kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
namespace: keda
|
||||
port: 8080
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
namespace: keda
|
||||
spec:
|
||||
from:
|
||||
- group: gateway.networking.k8s.io
|
||||
kind: HTTPRoute
|
||||
namespace: {{ .Release.Namespace }}
|
||||
to:
|
||||
- group: ""
|
||||
kind: Service
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
{{- end }}
|
|
@ -7,9 +7,17 @@ spec:
|
|||
hosts:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
targetPendingRequests: {{ .Values.targetPendingRequests }}
|
||||
{{- with .Values.pathPrefixes }}
|
||||
pathPrefixes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
scalingMetric:
|
||||
concurrency:
|
||||
targetValue: {{ .Values.targetPendingRequests }}
|
||||
scaleTargetRef:
|
||||
deployment: {{ include "xkcd.fullname" . }}
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
service: {{ include "xkcd.fullname" . }}
|
||||
port: 8080
|
||||
replicas:
|
||||
|
|
|
@ -2,11 +2,10 @@ apiVersion: networking.k8s.io/v1
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "xkcd.fullname" . }}
|
||||
namespace: {{ .Values.ingressNamespace | default .Release.Namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
{{- range .Values.hosts }}
|
||||
- host: {{ . | toString }}
|
||||
|
@ -16,7 +15,7 @@ spec:
|
|||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: keda-add-ons-http-interceptor-proxy
|
||||
name: {{ include "xkcd.fullname" $ }}-proxy
|
||||
port:
|
||||
number: 8080
|
||||
{{- end }}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "xkcd.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "xkcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "xkcd.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
|
@ -2,6 +2,9 @@ replicaCount: 1
|
|||
hosts:
|
||||
- "myhost.com"
|
||||
- "myhost2.com"
|
||||
pathPrefixes:
|
||||
- "/path1"
|
||||
- "/path2"
|
||||
targetPendingRequests: 200
|
||||
# This is the namespace that the ingress should be installed
|
||||
# into. It should be set to the same namespace as the
|
||||
|
|
199
go.mod
199
go.mod
|
@ -1,90 +1,149 @@
|
|||
module github.com/kedacore/http-add-on
|
||||
|
||||
go 1.19
|
||||
go 1.24.3
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.4
|
||||
github.com/go-logr/zapr v1.2.3
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0
|
||||
github.com/kedacore/keda/v2 v2.10.1-0.20230601160236-b5de66fe3857
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0
|
||||
github.com/kedacore/keda/v2 v2.17.1
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo/v2 v2.9.4
|
||||
github.com/onsi/gomega v1.27.6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/tj/assert v0.0.3
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/sync v0.2.0
|
||||
google.golang.org/grpc v1.54.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
k8s.io/api v0.27.1
|
||||
k8s.io/apimachinery v0.27.1
|
||||
k8s.io/client-go v0.27.1
|
||||
k8s.io/code-generator v0.27.1
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
|
||||
sigs.k8s.io/controller-runtime v0.15.0-alpha.0
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.14.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.32.2
|
||||
k8s.io/apimachinery v0.32.2
|
||||
k8s.io/client-go v1.5.2
|
||||
k8s.io/code-generator v0.32.2
|
||||
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979
|
||||
sigs.k8s.io/controller-runtime v0.19.7
|
||||
sigs.k8s.io/gateway-api v1.2.1
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0
|
||||
)
|
||||
|
||||
replace (
|
||||
// pin k8s.io to v0.31.7 & sigs.k8s.io/controller-runtime to v0.19.7
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.20.1
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.21.1
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.63.0
|
||||
k8s.io/api => k8s.io/api v0.31.7
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.31.7
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.31.7
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.31.7
|
||||
k8s.io/client-go => k8s.io/client-go v0.31.7
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.31.7
|
||||
k8s.io/component-base => k8s.io/component-base v0.31.7
|
||||
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
|
||||
k8s.io/metrics => k8s.io/metrics v0.31.6
|
||||
k8s.io/utils => k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/expr-lang/expr v1.17.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.15.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.1 // indirect
|
||||
k8s.io/component-base v0.27.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230426210814-b0c0aaee3cc0 // indirect
|
||||
knative.dev/pkg v0.0.0-20230404101938-ee73c9355c9d // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
519
go.sum
519
go.sum
|
@ -1,372 +1,309 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
|
||||
github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/expr-lang/expr v1.17.4 h1:qhTVftZ2Z3WpOEXRHWErEl2xf1Kq011MnQmWgLq06CY=
|
||||
github.com/expr-lang/expr v1.17.4/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
||||
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c h1:8AzxBXzXPCzl8EEsgWirPPDA5ru+bm5dVEV/KkpAKnE=
|
||||
github.com/golang/mock v1.7.0-rc.1.0.20220812172401-5b455625bd2c/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
|
||||
github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d h1:um9/pc7tKMINFfP1eE7Wv6PRGXlcCSJkVajF7KJw3uQ=
|
||||
github.com/google/pprof v0.0.0-20230309165930-d61513b1440d/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0 h1:nq9lQ5I71Heg2lRb2/+szuIWKY3Y73d8YKyXyN91WzU=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.0.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0 h1:Lf+9eD8m5pncvHAOCQj49GSN6aQI8XGfI5OpXNkoWaA=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kedacore/keda/v2 v2.10.1-0.20230601160236-b5de66fe3857 h1:Mvl+gPlyvf0mGp0ufBD0y1bqdVhQKr4M5LzDl4NQe5E=
|
||||
github.com/kedacore/keda/v2 v2.10.1-0.20230601160236-b5de66fe3857/go.mod h1:xaQBtg5rfDDnv6OKwlJLCK8UCsU95OqxDH70XoPp19k=
|
||||
github.com/kedacore/keda/v2 v2.17.1 h1:UomWibe5aO7COMUyF+jVM9fuENf4/wcSpiui65tF+d0=
|
||||
github.com/kedacore/keda/v2 v2.17.1/go.mod h1:yKJMF8zuLI2xXvZtgfcbW+V8k3VO4a4R/fucy3z5lC8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
|
||||
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
||||
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0 h1:xrAb/G80z/l5JL6XlmUMSD1i6W8vXkWrLfmkD3w/zZo=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0/go.mod h1:UREJtqioFu5awNaCR8aEx7MfJROFlAWb6lPaJFbHaG0=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20221215174704-0915cd710c24 h1:6w3iSY8IIkp5OQtbYj8NeuKG1jS9d+kYaubXqsoOiQ8=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0=
|
||||
k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E=
|
||||
k8s.io/apiextensions-apiserver v0.27.1 h1:Hp7B3KxKHBZ/FxmVFVpaDiXI6CCSr49P1OJjxKO6o4g=
|
||||
k8s.io/apiextensions-apiserver v0.27.1/go.mod h1:8jEvRDtKjVtWmdkhOqE84EcNWJt/uwF8PC4627UZghY=
|
||||
k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc=
|
||||
k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
|
||||
k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8=
|
||||
k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA=
|
||||
k8s.io/code-generator v0.27.1 h1:GrfUeUrJ/RtPskIsnChcXOW6h0EGNqty0VxxQ9qYKlM=
|
||||
k8s.io/code-generator v0.27.1/go.mod h1:iWtpm0ZMG6Gc4daWfITDSIu+WFhFJArYDhj242zcbnY=
|
||||
k8s.io/component-base v0.27.1 h1:kEB8p8lzi4gCs5f2SPU242vOumHJ6EOsOnDM3tTuDTM=
|
||||
k8s.io/component-base v0.27.1/go.mod h1:UGEd8+gxE4YWoigz5/lb3af3Q24w98pDseXcXZjw+E0=
|
||||
k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 h1:aClvVG6GbX10ISHcc24J+tqbr0S7fEe1MWkFJ7cWWCI=
|
||||
k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20230426210814-b0c0aaee3cc0 h1:XET+pmtvzC9NYUnHIX8PUPDoxqMTtDCJMRfJpoUSWow=
|
||||
k8s.io/kube-openapi v0.0.0-20230426210814-b0c0aaee3cc0/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/pkg v0.0.0-20230404101938-ee73c9355c9d h1:mubqXUjYfnwNg3IGWYEj2YffXYIxg44Qn9GS5vPAjck=
|
||||
knative.dev/pkg v0.0.0-20230404101938-ee73c9355c9d/go.mod h1:EQk8+qkZ8fMtrDYOOb9e9xMQG29N+L54iXBCfNXRm90=
|
||||
sigs.k8s.io/controller-runtime v0.15.0-alpha.0 h1:ukmgReObs7FEUNBcn2NLxn/DiEQ8g1yC8YvpX0HGiyE=
|
||||
sigs.k8s.io/controller-runtime v0.15.0-alpha.0/go.mod h1:icJQ1mtZAutJ9iOzS2V2VJQCBVV2ir+xahBeTHCCZGs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/api v0.31.7 h1:wSo59nXpVXmaB6hgNVJCrdnKtyYoutIgpNNBbROBd2U=
|
||||
k8s.io/api v0.31.7/go.mod h1:vLUha4nXRUGtQdayzsmjur0lQApK/sJSxyR/fwuujcU=
|
||||
k8s.io/apiextensions-apiserver v0.31.7 h1:FujQQl6iKuCF5nX4GIQy3ClvftU8MqadAyi9oQ6ZeAw=
|
||||
k8s.io/apiextensions-apiserver v0.31.7/go.mod h1:YmNzYECWFYy8n9R0oxtVAD9JYILZnZCNziYrpUQhKeI=
|
||||
k8s.io/apimachinery v0.31.7 h1:fpV8yLerIZFAkj0of66+i1ArPv/Btf9KO6Aulng7RRw=
|
||||
k8s.io/apimachinery v0.31.7/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.7 h1:2+LFJc6Xw6rhmpDbN1NSmhoFLWBh62cPG/P+IfaTSGY=
|
||||
k8s.io/client-go v0.31.7/go.mod h1:hrrMorBQ17LqzoKIxKg5cSWvmWl94EwA/MUF0Mkf+Zw=
|
||||
k8s.io/code-generator v0.31.7 h1:8BU7n+pK8td2600IiqH6EgxuiWbwVA1+uTOwIJ/nTUA=
|
||||
k8s.io/code-generator v0.31.7/go.mod h1:1oSRo6cJxwSCghcOFGsh53TKkUQ5ZgYoK7LBCFbhHDg=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea h1:ukJPq9MzFTEH/Sei5MSVnSE8+7NSCKixCDZPd6p4ohw=
|
||||
knative.dev/pkg v0.0.0-20250602175424-3c3a920206ea/go.mod h1:tFayQbi6t4+5HXuEGLOGvILW228Q7uaJp/FYEgbjJ3A=
|
||||
sigs.k8s.io/controller-runtime v0.19.6 h1:fuq53qTLQ7aJTA7aNsklNnu7eQtSFqJUomOyM+phPLk=
|
||||
sigs.k8s.io/controller-runtime v0.19.6/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||
sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM=
|
||||
sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
|
||||
sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0 h1:D3uASwjHWHmNiEHu3pPJBJMBIsb+auFvHrHql3HAarU=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.19.0/go.mod h1:29Vvdl26PidPLUDi7nfjYa/I0wHBkwCZp15Nlcc4y98=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 h1:MWtRRDWCwQEeW2rnJTqJMuV6Agy56P53SkbVoJpN7wA=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.6.0/go.mod h1:XuuZiQF7WdcvZzEYyNww9A0p3LazCKeJmCjeycN8e1I=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -22,6 +22,7 @@ limitations under the License.
|
|||
package hack
|
||||
|
||||
import (
|
||||
_ "github.com/golang/mock/mockgen"
|
||||
_ "go.uber.org/mock/mockgen"
|
||||
_ "k8s.io/code-generator"
|
||||
_ "sigs.k8s.io/kustomize/kustomize/v5"
|
||||
)
|
||||
|
|
|
@ -24,18 +24,16 @@ SCRIPT_ROOT="$(dirname "${BASH_SOURCE[0]}")/.."
|
|||
OUTPUT_BASE="$(mktemp -d)"
|
||||
|
||||
GO_PACKAGE='github.com/kedacore/http-add-on'
|
||||
GEN_SUFFIX='operator/generated'
|
||||
API_SUFFIX='operator/apis'
|
||||
|
||||
. "${CODEGEN_PKG}/generate-groups.sh" \
|
||||
'client,informer,lister' \
|
||||
"${GO_PACKAGE}/${GEN_SUFFIX}" \
|
||||
"${GO_PACKAGE}/${API_SUFFIX}" \
|
||||
'http:v1alpha1' \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
--output-base "${OUTPUT_BASE}"
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
rm -fR "${SCRIPT_ROOT}/${GEN_SUFFIX}"
|
||||
mv -nT "${OUTPUT_BASE}/${GO_PACKAGE}/${GEN_SUFFIX}" "${SCRIPT_ROOT}/${GEN_SUFFIX}"
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/operator/apis"
|
||||
|
||||
rm -fR "${OUTPUT_BASE}"
|
||||
kube::codegen::gen_client \
|
||||
--with-watch \
|
||||
--output-dir "${SCRIPT_ROOT}/operator/generated" \
|
||||
--output-pkg "github.com/kedacore/http-add-on/operator/generated" \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/operator/apis"
|
||||
|
|
|
@ -25,7 +25,7 @@ GEN='operator/generated'
|
|||
CPY='hack/boilerplate.go.txt'
|
||||
PKG='mock'
|
||||
|
||||
MOCKGEN_PKG="${MOCKGEN_PKG:-$(go list -f '{{ .Dir }}' -m github.com/golang/mock 2>/dev/null)/mockgen}"
|
||||
MOCKGEN_PKG="${MOCKGEN_PKG:-$(go list -f '{{ .Dir }}' -m go.uber.org/mock 2>/dev/null)/mockgen}"
|
||||
MOCKGEN="${OUTPUT}/mockgen"
|
||||
go build -o "${MOCKGEN}" "${MOCKGEN_PKG}"
|
||||
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
|
||||
# Define filename
|
||||
filename="$SCRIPT_ROOT/CHANGELOG.md"
|
||||
|
||||
# Check if file exists
|
||||
if [[ ! -f "$filename" ]]; then
|
||||
echo "Error: $filename does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Storing the version to be checked
|
||||
mapfile -t versions < <(awk '/## History/{flag=1;next}/## /{flag=0}flag' "$filename" | grep -o '\[[^]]*\]' | grep -v "v1." | sed 's/[][]//g')
|
||||
|
||||
# Define a function to extract and sort sections
|
||||
function extract_and_check() {
|
||||
local section=$1
|
||||
local content_block=$2
|
||||
local content=$(awk "/### $section/{flag=1;next}/### /{flag=0}flag" <<< "$content_block" | grep '^- \*\*')
|
||||
|
||||
# Skip if content does not exist
|
||||
if [[ -z "$content" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Separate and sort the **General**: lines
|
||||
local sorted_general_lines=$(echo "$content" | grep '^- \*\*General\*\*:' | LC_ALL=en_US sort --ignore-case)
|
||||
|
||||
# Sort the remaining lines
|
||||
local sorted_content=$(echo "$content" | grep -v '^- \*\*General\*\*:' | LC_ALL=en_US sort --ignore-case)
|
||||
|
||||
# Check if sorted_general_lines is not empty, then concatenate
|
||||
if [[ -n "$sorted_general_lines" ]]; then
|
||||
sorted_content=$(printf "%s\n%s" "$sorted_general_lines" "$sorted_content")
|
||||
fi
|
||||
|
||||
# Check pattern and throw error if wrong pattern found
|
||||
while IFS= read -r line; do
|
||||
echo "Error: Wrong pattern found in section: $section , line: $line"
|
||||
exit 1
|
||||
done < <(grep -Pv '^(-\s\*\*[^*]+\*\*: .*\(\[#([\d|TODO]+)\]\(https:\/\/github\.com\/kedacore\/(http-add-on|charts|governance)\/(pull|issues|discussions)\/\2\)(?:\|\[#(\d+)\]\(https:\/\/github\.com\/kedacore\/(http-add-on|charts|governance)\/(pull|issues|discussions)\/(?:\5)\)){0,}\))$' <<< "$content")
|
||||
|
||||
if [ "$content" != "$sorted_content" ]; then
|
||||
echo "Error: Section: $section is not sorted correctly. Correct order:"
|
||||
echo "$sorted_content"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Extract release sections, including "Unreleased", and check them
|
||||
for version in "${versions[@]}"; do
|
||||
release_content=$(awk "/## $version/{flag=1;next}/## v[0-9\.]+/{flag=0}flag" "$filename")
|
||||
|
||||
|
||||
if [[ -z "$release_content" ]]; then
|
||||
echo "No content found for $version Skipping."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Checking section: $version"
|
||||
|
||||
# Separate content into different sections and check sorting for each release
|
||||
extract_and_check "New" "$release_content"
|
||||
extract_and_check "Experimental" "$release_content"
|
||||
extract_and_check "Improvements" "$release_content"
|
||||
extract_and_check "Fixes" "$release_content"
|
||||
extract_and_check "Deprecations" "$release_content"
|
||||
extract_and_check "Other" "$release_content"
|
||||
|
||||
done
|
|
@ -44,6 +44,6 @@ if [[ $ret -eq 0 ]]
|
|||
then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run '${SCRIPT_ROOT}/hack/update-codegen.sh'"
|
||||
echo "${DIFFROOT} is out of date. Please run 'make codegen'"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM --platform=${BUILDPLATFORM} ghcr.io/kedacore/build-tools:1.20.4 as builder
|
||||
FROM --platform=${BUILDPLATFORM} ghcr.io/kedacore/keda-tools:1.24.3 as builder
|
||||
WORKDIR /workspace
|
||||
COPY go.* .
|
||||
RUN go mod download
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
)
|
||||
|
||||
// Metrics is the configuration for configuring metrics in the interceptor.
|
||||
type Metrics struct {
|
||||
// Sets whether or not to enable the Prometheus metrics exporter
|
||||
OtelPrometheusExporterEnabled bool `envconfig:"OTEL_PROM_EXPORTER_ENABLED" default:"true"`
|
||||
// Sets the port which the Prometheus compatible metrics endpoint should be served on
|
||||
OtelPrometheusExporterPort int `envconfig:"OTEL_PROM_EXPORTER_PORT" default:"2223"`
|
||||
// Sets whether or not to enable the OTEL metrics exporter
|
||||
OtelHTTPExporterEnabled bool `envconfig:"OTEL_EXPORTER_OTLP_METRICS_ENABLED" default:"false"`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
// newly created config. Returns nil and a non-nil error if parsing failed
|
||||
func MustParseMetrics() *Metrics {
|
||||
ret := new(Metrics)
|
||||
envconfig.MustProcess("", ret)
|
||||
return ret
|
||||
}
|
|
@ -24,11 +24,31 @@ type Serving struct {
|
|||
// ConfigMapCacheRsyncPeriod is the time interval
|
||||
// for the configmap informer to rsync the local cache.
|
||||
ConfigMapCacheRsyncPeriod time.Duration `envconfig:"KEDA_HTTP_SCALER_CONFIG_MAP_INFORMER_RSYNC_PERIOD" default:"60m"`
|
||||
// The interceptor has an internal process that periodically fetches the state
|
||||
// Deprecated: The interceptor has an internal process that periodically fetches the state
|
||||
// of deployment that is running the servers it forwards to.
|
||||
//
|
||||
// This is the interval (in milliseconds) representing how often to do a fetch
|
||||
DeploymentCachePollIntervalMS int `envconfig:"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS" default:"250"`
|
||||
// The interceptor has an internal process that periodically fetches the state
|
||||
// of endpoints that is running the servers it forwards to.
|
||||
//
|
||||
// This is the interval (in milliseconds) representing how often to do a fetch
|
||||
EndpointsCachePollIntervalMS int `envconfig:"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS" default:"250"`
|
||||
// ProxyTLSEnabled is a flag to specify whether the interceptor proxy should
|
||||
// be running using a TLS enabled server
|
||||
ProxyTLSEnabled bool `envconfig:"KEDA_HTTP_PROXY_TLS_ENABLED" default:"false"`
|
||||
// TLSCertPath is the path to read the certificate file from for the TLS server
|
||||
TLSCertPath string `envconfig:"KEDA_HTTP_PROXY_TLS_CERT_PATH" default:"/certs/tls.crt"`
|
||||
// TLSKeyPath is the path to read the private key file from for the TLS server
|
||||
TLSKeyPath string `envconfig:"KEDA_HTTP_PROXY_TLS_KEY_PATH" default:"/certs/tls.key"`
|
||||
// TLSCertStorePaths is a comma separated list of paths to read the certificate/key pairs for the TLS server
|
||||
TLSCertStorePaths string `envconfig:"KEDA_HTTP_PROXY_TLS_CERT_STORE_PATHS" default:""`
|
||||
// TLSSkipVerify is a boolean flag to specify whether the interceptor should skip TLS verification for upstreams
|
||||
TLSSkipVerify bool `envconfig:"KEDA_HTTP_PROXY_TLS_SKIP_VERIFY" default:"false"`
|
||||
// TLSPort is the port that the server should serve on if TLS is enabled
|
||||
TLSPort int `envconfig:"KEDA_HTTP_PROXY_TLS_PORT" default:"8443"`
|
||||
// ProfilingAddr if not empty, pprof will be available on this address, assuming host:port here
|
||||
ProfilingAddr string `envconfig:"PROFILING_BIND_ADDRESS" default:""`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
|
|
|
@ -16,9 +16,9 @@ type Timeouts struct {
|
|||
// ResponseHeaderTimeout is how long to wait between when the HTTP request
|
||||
// is sent to the backing app and when response headers need to arrive
|
||||
ResponseHeader time.Duration `envconfig:"KEDA_RESPONSE_HEADER_TIMEOUT" default:"500ms"`
|
||||
// DeploymentReplicas is how long to wait for the backing deployment
|
||||
// WorkloadReplicas is how long to wait for the backing workload
|
||||
// to have 1 or more replicas before connecting and sending the HTTP request.
|
||||
DeploymentReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
|
||||
WorkloadReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
|
||||
// ForceHTTP2 toggles whether to try to force HTTP2 for all requests
|
||||
ForceHTTP2 bool `envconfig:"KEDA_HTTP_FORCE_HTTP2" default:"false"`
|
||||
// MaxIdleConns is the max number of connections that can be idle in the
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
)
|
||||
|
||||
// Tracing is the configuration for configuring tracing through the interceptor.
|
||||
type Tracing struct {
|
||||
// States whether tracing should be enabled, False by default
|
||||
Enabled bool `envconfig:"OTEL_EXPORTER_OTLP_TRACES_ENABLED" default:"false"`
|
||||
// Sets what tracing export to use, must be one of: console,http/protobuf, grpc
|
||||
Exporter string `envconfig:"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL" default:"console"`
|
||||
}
|
||||
|
||||
// Parse parses standard configs using envconfig and returns a pointer to the
|
||||
// newly created config. Returns nil and a non-nil error if parsing failed
|
||||
func MustParseTracing() *Tracing {
|
||||
ret := new(Tracing)
|
||||
envconfig.MustProcess("", ret)
|
||||
return ret
|
||||
}
|
|
@ -2,16 +2,36 @@ package config
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
func Validate(srvCfg Serving, timeoutsCfg Timeouts) error {
|
||||
deplCachePollInterval := time.Duration(srvCfg.DeploymentCachePollIntervalMS) * time.Millisecond
|
||||
if timeoutsCfg.DeploymentReplicas < deplCachePollInterval {
|
||||
func Validate(srvCfg *Serving, timeoutsCfg Timeouts, lggr logr.Logger) error {
|
||||
// TODO(jorturfer): delete this for v0.9.0
|
||||
_, deploymentEnvExist := os.LookupEnv("KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS")
|
||||
_, endpointsEnvExist := os.LookupEnv("KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS")
|
||||
if deploymentEnvExist && endpointsEnvExist {
|
||||
return fmt.Errorf(
|
||||
"deployment replicas timeout (%s) should not be less than the Deployment Cache Poll Interval (%s)",
|
||||
timeoutsCfg.DeploymentReplicas,
|
||||
deplCachePollInterval,
|
||||
"%s and %s are mutual exclusive",
|
||||
"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS",
|
||||
"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS",
|
||||
)
|
||||
}
|
||||
if deploymentEnvExist && !endpointsEnvExist {
|
||||
srvCfg.EndpointsCachePollIntervalMS = srvCfg.DeploymentCachePollIntervalMS
|
||||
srvCfg.DeploymentCachePollIntervalMS = 0
|
||||
lggr.Info("WARNING: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS has been deprecated in favor of KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS and wil be removed for v0.9.0")
|
||||
}
|
||||
// END TODO
|
||||
|
||||
endpointsCachePollInterval := time.Duration(srvCfg.EndpointsCachePollIntervalMS) * time.Millisecond
|
||||
if timeoutsCfg.WorkloadReplicas < endpointsCachePollInterval {
|
||||
return fmt.Errorf(
|
||||
"workload replicas timeout (%s) should not be less than the Endpoints Cache Poll Interval (%s)",
|
||||
timeoutsCfg.WorkloadReplicas,
|
||||
endpointsCachePollInterval,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -5,65 +5,69 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
)
|
||||
|
||||
// forwardWaitFunc is a function that waits for a condition
|
||||
// before proceeding to serve the request.
|
||||
type forwardWaitFunc func(context.Context, string, string) (int, error)
|
||||
type forwardWaitFunc func(context.Context, string, string) (bool, error)
|
||||
|
||||
func deploymentCanServe(depl appsv1.Deployment) bool {
|
||||
return depl.Status.ReadyReplicas > 0
|
||||
func workloadActiveEndpoints(endpoints v1.Endpoints) int {
|
||||
total := 0
|
||||
for _, subset := range endpoints.Subsets {
|
||||
total += len(subset.Addresses)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func newDeployReplicasForwardWaitFunc(
|
||||
func newWorkloadReplicasForwardWaitFunc(
|
||||
lggr logr.Logger,
|
||||
deployCache k8s.DeploymentCache,
|
||||
endpointCache k8s.EndpointsCache,
|
||||
) forwardWaitFunc {
|
||||
return func(ctx context.Context, deployNS, deployName string) (int, error) {
|
||||
return func(ctx context.Context, endpointNS, endpointName string) (bool, error) {
|
||||
// get a watcher & its result channel before querying the
|
||||
// deployment cache, to ensure we don't miss events
|
||||
watcher, err := deployCache.Watch(deployNS, deployName)
|
||||
// endpoints cache, to ensure we don't miss events
|
||||
watcher, err := endpointCache.Watch(endpointNS, endpointName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return false, err
|
||||
}
|
||||
eventCh := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
deployment, err := deployCache.Get(deployNS, deployName)
|
||||
endpoints, err := endpointCache.Get(endpointNS, endpointName)
|
||||
if err != nil {
|
||||
// if we didn't get the initial deployment state, bail out
|
||||
return 0, fmt.Errorf(
|
||||
"error getting state for deployment %s/%s (%s)",
|
||||
deployNS,
|
||||
deployName,
|
||||
// if we didn't get the initial endpoints state, bail out
|
||||
return false, fmt.Errorf(
|
||||
"error getting state for endpoints %s/%s: %w",
|
||||
endpointNS,
|
||||
endpointName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
// if there is 1 or more replica, we're done waiting
|
||||
if deploymentCanServe(deployment) {
|
||||
return int(deployment.Status.ReadyReplicas), nil
|
||||
// if there is 1 or more active endpoints, we're done waiting
|
||||
activeEndpoints := workloadActiveEndpoints(endpoints)
|
||||
if activeEndpoints > 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
deployment, ok := event.Object.(*appsv1.Deployment)
|
||||
endpoints, ok := event.Object.(*v1.Endpoints)
|
||||
if !ok {
|
||||
lggr.Info(
|
||||
"Didn't get a deployment back in event",
|
||||
"Didn't get a endpoints back in event",
|
||||
)
|
||||
} else if deploymentCanServe(*deployment) {
|
||||
return 0, nil
|
||||
} else if activeEndpoints := workloadActiveEndpoints(*endpoints); activeEndpoints > 0 {
|
||||
return true, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// otherwise, if the context is marked done before
|
||||
// we're done waiting, fail.
|
||||
return 0, fmt.Errorf(
|
||||
"context marked done while waiting for deployment %s to reach > 0 replicas (%w)",
|
||||
deployName,
|
||||
return false, fmt.Errorf(
|
||||
"context marked done while waiting for workload reach > 0 replicas: %w",
|
||||
ctx.Err(),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
|
@ -17,70 +16,55 @@ import (
|
|||
)
|
||||
|
||||
// Test to make sure the wait function returns a nil error if there is immediately
|
||||
// one replica on the target deployment
|
||||
// one active endpoint on the target deployment
|
||||
func TestForwardWaitFuncOneReplica(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
const waitFuncWait = 1 * time.Second
|
||||
r := require.New(t)
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerDeploy"
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
))
|
||||
const endpointsName = "TestForwardingHandler"
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
r.NoError(cache.SetSubsets(ns, endpointsName, 1))
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, waitFuncWait)
|
||||
defer done()
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
group.Go(func() error {
|
||||
_, err := waitFunc(ctx, ns, deployName)
|
||||
_, err := waitFunc(ctx, ns, endpointsName)
|
||||
return err
|
||||
})
|
||||
r.NoError(group.Wait(), "wait function failed, but it shouldn't have")
|
||||
}
|
||||
|
||||
// Test to make sure the wait function returns an error if there are no replicas, and that doesn't change
|
||||
// Test to make sure the wait function returns an error if there are active endpoints, and that doesn't change
|
||||
// within a timeout
|
||||
func TestForwardWaitFuncNoReplicas(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const waitFuncWait = 1 * time.Second
|
||||
r := require.New(t)
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerHoldsDeployment"
|
||||
deployment := newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
)
|
||||
deployment.Status.ReadyReplicas = 0
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*deployment)
|
||||
const endpointsName = "TestForwardWaitFuncNoReplicas"
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, waitFuncWait)
|
||||
defer done()
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
_, err := waitFunc(ctx, ns, deployName)
|
||||
_, err := waitFunc(ctx, ns, endpointsName)
|
||||
r.Error(err)
|
||||
}
|
||||
|
||||
|
@ -90,100 +74,58 @@ func TestWaitFuncWaitsUntilReplicas(t *testing.T) {
|
|||
totalWaitDur := 500 * time.Millisecond
|
||||
|
||||
const ns = "testNS"
|
||||
const deployName = "TestForwardingHandlerHoldsDeployment"
|
||||
deployment := newDeployment(
|
||||
ns,
|
||||
deployName,
|
||||
"myimage",
|
||||
[]int32{123},
|
||||
nil,
|
||||
map[string]string{},
|
||||
corev1.PullAlways,
|
||||
)
|
||||
deployment.Spec.Replicas = k8s.Int32P(0)
|
||||
cache := k8s.NewFakeDeploymentCache()
|
||||
cache.AddDeployment(*deployment)
|
||||
const endpointsName = "TestForwardingHandlerHolds"
|
||||
|
||||
endpoints := *newEndpoint(ns, endpointsName)
|
||||
cache := k8s.NewFakeEndpointsCache()
|
||||
cache.Set(endpoints)
|
||||
// create a watcher first so that the goroutine
|
||||
// can later fetch it and send a message on it
|
||||
_, err := cache.Watch(ns, deployName)
|
||||
_, err := cache.Watch(ns, endpointsName)
|
||||
r.NoError(err)
|
||||
|
||||
ctx, done := context.WithTimeout(ctx, totalWaitDur)
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(
|
||||
logr.Discard(),
|
||||
cache,
|
||||
)
|
||||
|
||||
// this channel will be closed immediately after the replicas were increased
|
||||
replicasIncreasedCh := make(chan struct{})
|
||||
// this channel will be closed immediately after the active endpoints were increased
|
||||
activeEndpointsIncreasedCh := make(chan struct{})
|
||||
go func() {
|
||||
time.Sleep(totalWaitDur / 2)
|
||||
watcher := cache.GetWatcher(ns, deployName)
|
||||
watcher := cache.GetWatcher(ns, endpointsName)
|
||||
r.NotNil(watcher, "watcher was not found")
|
||||
modifiedDeployment := deployment.DeepCopy()
|
||||
modifiedDeployment.Spec.Replicas = k8s.Int32P(1)
|
||||
watcher.Action(watch.Modified, modifiedDeployment)
|
||||
close(replicasIncreasedCh)
|
||||
modifiedEndpoints := endpoints.DeepCopy()
|
||||
modifiedEndpoints.Subsets = []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
}
|
||||
watcher.Action(watch.Modified, modifiedEndpoints)
|
||||
close(activeEndpointsIncreasedCh)
|
||||
}()
|
||||
_, err = waitFunc(ctx, ns, deployName)
|
||||
_, err = waitFunc(ctx, ns, endpointsName)
|
||||
r.NoError(err)
|
||||
done()
|
||||
}
|
||||
|
||||
// newDeployment creates a new deployment object
|
||||
// newEndpoint creates a new endpoints object
|
||||
// with the given name and the given image. This does not actually create
|
||||
// the deployment in the cluster, it just creates the deployment object
|
||||
// the endpoints in the cluster, it just creates the endpoints object
|
||||
// in memory
|
||||
func newDeployment(
|
||||
func newEndpoint(
|
||||
namespace,
|
||||
name,
|
||||
image string,
|
||||
ports []int32,
|
||||
env []corev1.EnvVar,
|
||||
labels map[string]string,
|
||||
pullPolicy corev1.PullPolicy,
|
||||
) *appsv1.Deployment {
|
||||
containerPorts := make([]corev1.ContainerPort, len(ports))
|
||||
for i, port := range ports {
|
||||
containerPorts[i] = corev1.ContainerPort{
|
||||
ContainerPort: port,
|
||||
}
|
||||
}
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
},
|
||||
name string,
|
||||
) *v1.Endpoints {
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Replicas: k8s.Int32P(1),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: image,
|
||||
Name: name,
|
||||
ImagePullPolicy: pullPolicy,
|
||||
Ports: containerPorts,
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.DeploymentStatus{
|
||||
ReadyReplicas: 1,
|
||||
},
|
||||
}
|
||||
|
||||
return deployment
|
||||
return endpoints
|
||||
}
|
||||
|
|
|
@ -5,6 +5,12 @@ import (
|
|||
"net/http"
|
||||
"net/http/httputil"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
|
@ -13,12 +19,16 @@ var (
|
|||
)
|
||||
|
||||
type Upstream struct {
|
||||
roundTripper http.RoundTripper
|
||||
roundTripper http.RoundTripper
|
||||
tracingCfg *config.Tracing
|
||||
shouldFailover bool
|
||||
}
|
||||
|
||||
func NewUpstream(roundTripper http.RoundTripper) *Upstream {
|
||||
func NewUpstream(roundTripper http.RoundTripper, tracingCfg *config.Tracing, shouldFailover bool) *Upstream {
|
||||
return &Upstream{
|
||||
roundTripper: roundTripper,
|
||||
roundTripper: roundTripper,
|
||||
tracingCfg: tracingCfg,
|
||||
shouldFailover: shouldFailover,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +38,26 @@ func (uh *Upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
r = util.RequestWithLoggerWithName(r, "UpstreamHandler")
|
||||
ctx := r.Context()
|
||||
|
||||
if uh.tracingCfg.Enabled {
|
||||
p := otel.GetTextMapPropagator()
|
||||
ctx = p.Extract(ctx, propagation.HeaderCarrier(r.Header))
|
||||
|
||||
p.Inject(ctx, propagation.HeaderCarrier(w.Header()))
|
||||
|
||||
span := trace.SpanFromContext(ctx)
|
||||
defer span.End()
|
||||
|
||||
serviceValAttr := attribute.String("service", "keda-http-interceptor-proxy-upstream")
|
||||
coldStartValAttr := attribute.String("cold-start", w.Header().Get("X-KEDA-HTTP-Cold-Start"))
|
||||
|
||||
span.SetAttributes(serviceValAttr, coldStartValAttr)
|
||||
}
|
||||
|
||||
stream := util.StreamFromContext(ctx)
|
||||
if uh.shouldFailover {
|
||||
stream = util.FailoverStreamFromContext(ctx)
|
||||
}
|
||||
|
||||
if stream == nil {
|
||||
sh := NewStatic(http.StatusInternalServerError, errNilStream)
|
||||
sh.ServeHTTP(w, r)
|
||||
|
@ -37,11 +66,13 @@ func (uh *Upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(stream)
|
||||
superDirector := proxy.Director
|
||||
proxy.Transport = uh.roundTripper
|
||||
proxy.Director = func(req *http.Request) {
|
||||
superDirector(req)
|
||||
req.URL = stream
|
||||
req.Host = stream.Host
|
||||
req.URL.Path = r.URL.Path
|
||||
req.URL.RawPath = r.URL.RawPath
|
||||
req.URL.RawQuery = r.URL.RawQuery
|
||||
// delete the incoming X-Forwarded-For header so the proxy
|
||||
// puts its own in. This is also important to prevent IP spoofing
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -9,13 +11,227 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/sdk/trace/tracetest"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
traceID = "a8419b25ec2051e5"
|
||||
fullW3CLengthTraceID = "29b3290dc5a93f2618b17502ccb2a728"
|
||||
spanID = "97337bce1bc3e368"
|
||||
parentSpanID = "2890e7e08fc6592b"
|
||||
sampled = "1"
|
||||
w3cPadding = "0000000000000000"
|
||||
)
|
||||
|
||||
func TestB3MultiPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
request.Header.Set("X-B3-Traceid", traceID)
|
||||
request.Header.Set("X-B3-Spanid", spanID)
|
||||
request.Header.Set("X-B3-Parentspanid", parentSpanID)
|
||||
request.Header.Set("X-B3-Sampled", sampled)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("X-B3-Parentspanid"), parentSpanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Traceid"), traceID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Spanid"), spanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Sampled"), sampled)
|
||||
|
||||
r.NotContains(receivedHeaders, "Traceparent")
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(w3cPadding+traceID, sc.TraceID().String())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestW3CAndB3MultiPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
request.Header.Set("X-B3-Traceid", traceID)
|
||||
request.Header.Set("X-B3-Spanid", spanID)
|
||||
request.Header.Set("X-B3-Parentspanid", parentSpanID)
|
||||
request.Header.Set("X-B3-Sampled", sampled)
|
||||
request.Header.Set("Traceparent", w3cPadding+traceID)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("X-B3-Parentspanid"), parentSpanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Traceid"), traceID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Spanid"), spanID)
|
||||
r.Equal(receivedHeaders.Get("X-B3-Sampled"), sampled)
|
||||
r.Equal(receivedHeaders.Get("Traceparent"), w3cPadding+traceID)
|
||||
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(w3cPadding+traceID, sc.TraceID().String())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestW3CPropagation(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
traceParent := fmt.Sprintf("00-%s-%s-01", fullW3CLengthTraceID, spanID)
|
||||
request.Header.Set("Traceparent", traceParent)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.Equal(receivedHeaders.Get("Traceparent"), traceParent)
|
||||
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
r.NotContains(receivedHeaders, "X-B3-Parentspanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Traceid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Spanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Sampled")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.Equal(fullW3CLengthTraceID, sc.TraceID().String())
|
||||
r.Equal(true, sc.IsSampled())
|
||||
r.NotEqual(sc.SpanID().String(), spanID)
|
||||
}
|
||||
|
||||
func TestPropagationWhenNoHeaders(t *testing.T) {
|
||||
// Given
|
||||
r := require.New(t)
|
||||
|
||||
microservice, microserviceURL, closeServer := startMicroservice(t)
|
||||
defer closeServer()
|
||||
|
||||
exporter, tracerProvider := setupOTelSDKForTesting()
|
||||
instrumentedServeHTTP := withAutoInstrumentation(serveHTTP)
|
||||
|
||||
request, responseWriter := createRequestAndResponse("GET", microserviceURL)
|
||||
|
||||
defer func(traceProvider *trace.TracerProvider, ctx context.Context) {
|
||||
_ = traceProvider.Shutdown(ctx)
|
||||
}(tracerProvider, request.Context())
|
||||
|
||||
// When
|
||||
instrumentedServeHTTP.ServeHTTP(responseWriter, request)
|
||||
|
||||
// Then
|
||||
receivedRequest := microservice.IncomingRequests()[0]
|
||||
receivedHeaders := receivedRequest.Header
|
||||
|
||||
r.NotContains(receivedHeaders, "Traceparent")
|
||||
r.NotContains(receivedHeaders, "B3")
|
||||
r.NotContains(receivedHeaders, "b3")
|
||||
r.NotContains(receivedHeaders, "X-B3-Parentspanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Traceid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Spanid")
|
||||
r.NotContains(receivedHeaders, "X-B3-Sampled")
|
||||
|
||||
_ = tracerProvider.ForceFlush(request.Context())
|
||||
|
||||
exportedSpans := exporter.GetSpans()
|
||||
if len(exportedSpans) != 1 {
|
||||
t.Fatalf("Expected 1 Span, got %d", len(exportedSpans))
|
||||
}
|
||||
sc := exportedSpans[0].SpanContext
|
||||
r.NotEmpty(sc.SpanID())
|
||||
r.NotEmpty(sc.TraceID())
|
||||
|
||||
hasServiceAttribute := false
|
||||
hasColdStartAttribute := false
|
||||
for _, attribute := range exportedSpans[0].Attributes {
|
||||
if attribute.Key == "service" && attribute.Value.AsString() == "keda-http-interceptor-proxy-upstream" {
|
||||
hasServiceAttribute = true
|
||||
}
|
||||
|
||||
if attribute.Key == "cold-start" {
|
||||
hasColdStartAttribute = true
|
||||
}
|
||||
}
|
||||
r.True(hasServiceAttribute)
|
||||
r.True(hasColdStartAttribute)
|
||||
}
|
||||
|
||||
func TestForwarderSuccess(t *testing.T) {
|
||||
r := require.New(t)
|
||||
// this channel will be closed after the request was received, but
|
||||
|
@ -43,7 +259,7 @@ func TestForwarderSuccess(t *testing.T) {
|
|||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
|
||||
r.True(
|
||||
|
@ -88,7 +304,7 @@ func TestForwarderHeaderTimeout(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
|
||||
forwardedRequests := hdl.IncomingRequests()
|
||||
|
@ -138,7 +354,7 @@ func TestForwarderWaitsForSlowOrigin(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, originURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
// wait for the goroutine above to finish, with a little cusion
|
||||
ensureSignalBeforeTimeout(originWaitCh, originDelay*2)
|
||||
|
@ -161,7 +377,7 @@ func TestForwarderConnectionRetryAndTimeout(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, noSuchURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
|
||||
start := time.Now()
|
||||
uh.ServeHTTP(res, req)
|
||||
|
@ -217,7 +433,7 @@ func TestForwardRequestRedirectAndHeaders(t *testing.T) {
|
|||
r.NoError(err)
|
||||
req = util.RequestWithStream(req, srvURL)
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
uh := NewUpstream(rt)
|
||||
uh := NewUpstream(rt, &config.Tracing{}, false)
|
||||
uh.ServeHTTP(res, req)
|
||||
r.Equal(301, res.Code)
|
||||
r.Equal("abc123.com", res.Header().Get("Location"))
|
||||
|
@ -238,10 +454,10 @@ func newRoundTripper(
|
|||
|
||||
func defaultTimeouts() config.Timeouts {
|
||||
return config.Timeouts{
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
DeploymentReplicas: 1 * time.Second,
|
||||
Connect: 100 * time.Millisecond,
|
||||
KeepAlive: 100 * time.Millisecond,
|
||||
ResponseHeader: 500 * time.Millisecond,
|
||||
WorkloadReplicas: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,3 +497,56 @@ func ensureSignalBeforeTimeout(signalCh <-chan struct{}, timeout time.Duration)
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func serveHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
timeouts := defaultTimeouts()
|
||||
dialCtxFunc := retryDialContextFunc(timeouts, timeouts.DefaultBackoff())
|
||||
rt := newRoundTripper(dialCtxFunc, timeouts.ResponseHeader)
|
||||
upstream := NewUpstream(rt, &config.Tracing{Enabled: true}, false)
|
||||
|
||||
upstream.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func setupOTelSDKForTesting() (*tracetest.InMemoryExporter, *trace.TracerProvider) {
|
||||
exporter := tracetest.NewInMemoryExporter()
|
||||
traceProvider := trace.NewTracerProvider(trace.WithBatcher(exporter, trace.WithBatchTimeout(time.Second)))
|
||||
otel.SetTracerProvider(traceProvider)
|
||||
prop := tracing.NewPropagator()
|
||||
otel.SetTextMapPropagator(prop)
|
||||
return exporter, traceProvider
|
||||
}
|
||||
|
||||
func startMicroservice(t *testing.T) (*kedanet.TestHTTPHandlerWrapper, *url.URL, func()) {
|
||||
assert := require.New(t)
|
||||
requestReceiveChannel := make(chan struct{})
|
||||
|
||||
const respCode = 200
|
||||
const respBody = "Success Response"
|
||||
microservice := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
close(requestReceiveChannel)
|
||||
w.WriteHeader(respCode)
|
||||
_, err := w.Write([]byte(respBody))
|
||||
assert.NoError(err)
|
||||
}),
|
||||
)
|
||||
server := httptest.NewServer(microservice)
|
||||
|
||||
url, err := url.Parse(server.URL)
|
||||
assert.NoError(err)
|
||||
|
||||
return microservice, url, func() {
|
||||
server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func createRequestAndResponse(method string, url *url.URL) (*http.Request, http.ResponseWriter) {
|
||||
ctx := util.ContextWithStream(context.Background(), url)
|
||||
request, _ := http.NewRequestWithContext(ctx, method, url.String(), nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
return request, recorder
|
||||
}
|
||||
|
||||
func withAutoInstrumentation(sut func(w http.ResponseWriter, r *http.Request)) http.Handler {
|
||||
return otelhttp.NewHandler(http.HandlerFunc(sut), "SystemUnderTest")
|
||||
}
|
||||
|
|
|
@ -2,165 +2,239 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/sync/errgroup"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/handler"
|
||||
"github.com/kedacore/http-add-on/interceptor/metrics"
|
||||
"github.com/kedacore/http-add-on/interceptor/middleware"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
clientset "github.com/kedacore/http-add-on/operator/generated/clientset/versioned"
|
||||
informers "github.com/kedacore/http-add-on/operator/generated/informers/externalversions"
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
kedahttp "github.com/kedacore/http-add-on/pkg/http"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
pkglog "github.com/kedacore/http-add-on/pkg/log"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
"github.com/kedacore/http-add-on/pkg/routing"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
var (
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=http.keda.sh,resources=httpscaledobjects,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch
|
||||
|
||||
func main() {
|
||||
lggr, err := pkglog.NewZapr()
|
||||
if err != nil {
|
||||
fmt.Println("Error building logger", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer os.Exit(1)
|
||||
timeoutCfg := config.MustParseTimeouts()
|
||||
servingCfg := config.MustParseServing()
|
||||
if err := config.Validate(*servingCfg, *timeoutCfg); err != nil {
|
||||
lggr.Error(err, "invalid configuration")
|
||||
os.Exit(1)
|
||||
metricsCfg := config.MustParseMetrics()
|
||||
tracingCfg := config.MustParseTracing()
|
||||
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
ctx := util.ContextWithLogger(context.Background(), lggr)
|
||||
ctx, ctxDone := context.WithCancel(ctx)
|
||||
lggr.Info(
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
if err := config.Validate(servingCfg, *timeoutCfg, ctrl.Log); err != nil {
|
||||
setupLog.Error(err, "invalid configuration")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
setupLog.Info(
|
||||
"starting interceptor",
|
||||
"timeoutConfig",
|
||||
timeoutCfg,
|
||||
"servingConfig",
|
||||
servingCfg,
|
||||
"metricsConfig",
|
||||
metricsCfg,
|
||||
)
|
||||
|
||||
proxyPort := servingCfg.ProxyPort
|
||||
adminPort := servingCfg.AdminPort
|
||||
proxyTLSEnabled := servingCfg.ProxyTLSEnabled
|
||||
profilingAddr := servingCfg.ProfilingAddr
|
||||
|
||||
// setup the configured metrics collectors
|
||||
metrics.NewMetricsCollectors(metricsCfg)
|
||||
|
||||
cfg := ctrl.GetConfigOrDie()
|
||||
|
||||
cl, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new Kubernetes ClientSet")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new Kubernetes ClientSet")
|
||||
runtime.Goexit()
|
||||
}
|
||||
deployCache := k8s.NewInformerBackedDeploymentCache(
|
||||
lggr,
|
||||
cl,
|
||||
time.Millisecond*time.Duration(servingCfg.DeploymentCachePollIntervalMS),
|
||||
)
|
||||
|
||||
k8sSharedInformerFactory := k8sinformers.NewSharedInformerFactory(cl, time.Millisecond*time.Duration(servingCfg.EndpointsCachePollIntervalMS))
|
||||
svcCache := k8s.NewInformerBackedServiceCache(ctrl.Log, cl, k8sSharedInformerFactory)
|
||||
endpointsCache := k8s.NewInformerBackedEndpointsCache(ctrl.Log, cl, time.Millisecond*time.Duration(servingCfg.EndpointsCachePollIntervalMS))
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new deployment cache")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new endpoints cache")
|
||||
runtime.Goexit()
|
||||
}
|
||||
waitFunc := newDeployReplicasForwardWaitFunc(lggr, deployCache)
|
||||
waitFunc := newWorkloadReplicasForwardWaitFunc(ctrl.Log, endpointsCache)
|
||||
|
||||
httpCl, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
lggr.Error(err, "creating new HTTP ClientSet")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "creating new HTTP ClientSet")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
queues := queue.NewMemory()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(httpCl, servingCfg.ConfigMapCacheRsyncPeriod)
|
||||
routingTable, err := routing.NewTable(sharedInformerFactory, servingCfg.WatchNamespace)
|
||||
routingTable, err := routing.NewTable(sharedInformerFactory, servingCfg.WatchNamespace, queues)
|
||||
if err != nil {
|
||||
lggr.Error(err, "fetching routing table")
|
||||
os.Exit(1)
|
||||
setupLog.Error(err, "fetching routing table")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
q := queue.NewMemory()
|
||||
setupLog.Info("Interceptor starting")
|
||||
|
||||
lggr.Info("Interceptor starting")
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
ctx = util.ContextWithLogger(ctx, ctrl.Log)
|
||||
|
||||
errGrp, ctx := errgroup.WithContext(ctx)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// start the deployment cache updater
|
||||
errGrp.Go(func() error {
|
||||
defer ctxDone()
|
||||
err := deployCache.Start(ctx)
|
||||
lggr.Error(err, "deployment cache watcher failed")
|
||||
return err
|
||||
if tracingCfg.Enabled {
|
||||
shutdown, err := tracing.SetupOTelSDK(ctx, tracingCfg)
|
||||
|
||||
if err != nil {
|
||||
setupLog.Error(err, "Error setting up tracer")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = errors.Join(err, shutdown(context.Background()))
|
||||
}()
|
||||
}
|
||||
|
||||
// start the endpoints cache updater
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("starting the endpoints cache")
|
||||
|
||||
endpointsCache.Start(ctx)
|
||||
k8sSharedInformerFactory.Start(ctx.Done())
|
||||
return nil
|
||||
})
|
||||
|
||||
// start the update loop that updates the routing table from
|
||||
// the ConfigMap that the operator updates as HTTPScaledObjects
|
||||
// enter and exit the system
|
||||
errGrp.Go(func() error {
|
||||
defer ctxDone()
|
||||
err := routingTable.Start(ctx)
|
||||
lggr.Error(err, "config map routing table updater failed")
|
||||
return err
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("starting the routing table")
|
||||
|
||||
if err := routingTable.Start(ctx); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "routing table failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// start the administrative server. this is the server
|
||||
// that serves the queue size API
|
||||
errGrp.Go(func() error {
|
||||
defer ctxDone()
|
||||
lggr.Info(
|
||||
"starting the admin server",
|
||||
"port",
|
||||
adminPort,
|
||||
)
|
||||
err := runAdminServer(
|
||||
ctx,
|
||||
lggr,
|
||||
q,
|
||||
adminPort,
|
||||
)
|
||||
lggr.Error(err, "admin server failed")
|
||||
return err
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("starting the admin server", "port", adminPort)
|
||||
|
||||
if err := runAdminServer(ctx, ctrl.Log, queues, adminPort); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "admin server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// start the proxy server. this is the server that
|
||||
if metricsCfg.OtelPrometheusExporterEnabled {
|
||||
// start the prometheus compatible metrics server
|
||||
// serves a prometheus compatible metrics endpoint on the configured port
|
||||
eg.Go(func() error {
|
||||
if err := runMetricsServer(ctx, ctrl.Log, metricsCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "could not start the Prometheus metrics server")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// start the proxy servers. This is the server that
|
||||
// accepts, holds and forwards user requests
|
||||
errGrp.Go(func() error {
|
||||
defer ctxDone()
|
||||
lggr.Info(
|
||||
"starting the proxy server",
|
||||
"port",
|
||||
proxyPort,
|
||||
)
|
||||
err := runProxyServer(
|
||||
ctx,
|
||||
lggr,
|
||||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
timeoutCfg,
|
||||
proxyPort,
|
||||
)
|
||||
lggr.Error(err, "proxy server failed")
|
||||
return err
|
||||
})
|
||||
build.PrintComponentInfo(lggr, "Interceptor")
|
||||
// start a proxy server with TLS
|
||||
if proxyTLSEnabled {
|
||||
eg.Go(func() error {
|
||||
proxyTLSConfig := map[string]interface{}{"certificatePath": servingCfg.TLSCertPath, "keyPath": servingCfg.TLSKeyPath, "certstorePaths": servingCfg.TLSCertStorePaths, "skipVerify": servingCfg.TLSSkipVerify}
|
||||
proxyTLSPort := servingCfg.TLSPort
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
// errGrp.Wait() should hang forever for healthy admin and proxy servers.
|
||||
// if it returns an error, log and exit immediately.
|
||||
waitErr := errGrp.Wait()
|
||||
lggr.Error(waitErr, "error with interceptor")
|
||||
os.Exit(1)
|
||||
setupLog.Info("starting the proxy server with TLS enabled", "port", proxyTLSPort)
|
||||
|
||||
if err := runProxyServer(ctx, ctrl.Log, queues, waitFunc, routingTable, svcCache, timeoutCfg, proxyTLSPort, proxyTLSEnabled, proxyTLSConfig, tracingCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "tls proxy server failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// start a proxy server without TLS.
|
||||
eg.Go(func() error {
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
setupLog.Info("starting the proxy server with TLS disabled", "port", proxyPort)
|
||||
|
||||
k8sSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
if err := runProxyServer(ctx, ctrl.Log, queues, waitFunc, routingTable, svcCache, timeoutCfg, proxyPort, false, nil, tracingCfg); !util.IsIgnoredErr(err) {
|
||||
setupLog.Error(err, "proxy server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if len(profilingAddr) > 0 {
|
||||
eg.Go(func() error {
|
||||
setupLog.Info("enabling pprof for profiling", "address", profilingAddr)
|
||||
return http.ListenAndServe(profilingAddr, nil)
|
||||
})
|
||||
}
|
||||
|
||||
build.PrintComponentInfo(ctrl.Log, "Interceptor")
|
||||
|
||||
if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
setupLog.Error(err, "fatal error")
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
setupLog.Info("Bye!")
|
||||
}
|
||||
|
||||
func runAdminServer(
|
||||
|
@ -179,7 +253,148 @@ func runAdminServer(
|
|||
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", port)
|
||||
lggr.Info("admin server starting", "address", addr)
|
||||
return kedahttp.ServeContext(ctx, addr, adminServer)
|
||||
return kedahttp.ServeContext(ctx, addr, adminServer, nil)
|
||||
}
|
||||
|
||||
func runMetricsServer(
|
||||
ctx context.Context,
|
||||
lggr logr.Logger,
|
||||
metricsCfg *config.Metrics,
|
||||
) error {
|
||||
lggr.Info("starting the prometheus metrics server", "port", metricsCfg.OtelPrometheusExporterPort, "path", "/metrics")
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", metricsCfg.OtelPrometheusExporterPort)
|
||||
return kedahttp.ServeContext(ctx, addr, promhttp.Handler(), nil)
|
||||
}
|
||||
|
||||
// addCert adds a certificate to the map of certificates based on the certificate's SANs
|
||||
func addCert(m map[string]tls.Certificate, certPath, keyPath string, logger logr.Logger) (*tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading certificate and key: %w", err)
|
||||
}
|
||||
if cert.Leaf == nil {
|
||||
if len(cert.Certificate) == 0 {
|
||||
return nil, fmt.Errorf("no certificate found in certificate chain")
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %w", err)
|
||||
}
|
||||
}
|
||||
for _, d := range cert.Leaf.DNSNames {
|
||||
logger.Info("adding certificate", "dns", d)
|
||||
m[d] = cert
|
||||
}
|
||||
for _, ip := range cert.Leaf.IPAddresses {
|
||||
logger.Info("adding certificate", "ip", ip.String())
|
||||
m[ip.String()] = cert
|
||||
}
|
||||
for _, uri := range cert.Leaf.URIs {
|
||||
logger.Info("adding certificate", "uri", uri.String())
|
||||
m[uri.String()] = cert
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
|
||||
func defaultCertPool(logger logr.Logger) *x509.CertPool {
|
||||
systemCAs, err := x509.SystemCertPool()
|
||||
if err == nil {
|
||||
return systemCAs
|
||||
}
|
||||
|
||||
logger.Info("error loading system CA pool, using empty pool", "error", err)
|
||||
return x509.NewCertPool()
|
||||
}
|
||||
|
||||
// getTLSConfig creates a TLS config from KEDA_HTTP_PROXY_TLS_CERT_PATH, KEDA_HTTP_PROXY_TLS_KEY_PATH and KEDA_HTTP_PROXY_TLS_CERTSTORE_PATHS
|
||||
// The matching between request and certificate is performed by comparing TLS/SNI server name with x509 SANs
|
||||
func getTLSConfig(tlsConfig map[string]interface{}, logger logr.Logger) (*tls.Config, error) {
|
||||
certPath, _ := tlsConfig["certificatePath"].(string)
|
||||
keyPath, _ := tlsConfig["keyPath"].(string)
|
||||
certStorePaths, _ := tlsConfig["certstorePaths"].(string)
|
||||
insecureSkipVerify, _ := tlsConfig["skipVerify"].(bool)
|
||||
|
||||
servingTLS := &tls.Config{
|
||||
RootCAs: defaultCertPool(logger),
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
}
|
||||
var defaultCert *tls.Certificate
|
||||
|
||||
uriDomainsToCerts := make(map[string]tls.Certificate)
|
||||
if certPath != "" && keyPath != "" {
|
||||
cert, err := addCert(uriDomainsToCerts, certPath, keyPath, logger)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error adding certificate and key: %w", err)
|
||||
}
|
||||
defaultCert = cert
|
||||
rawCert, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error reading certificate: %w", err)
|
||||
}
|
||||
servingTLS.RootCAs.AppendCertsFromPEM(rawCert)
|
||||
}
|
||||
|
||||
if certStorePaths != "" {
|
||||
certFiles := make(map[string]string)
|
||||
keyFiles := make(map[string]string)
|
||||
dirPaths := strings.Split(certStorePaths, ",")
|
||||
for _, dir := range dirPaths {
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
switch {
|
||||
case strings.HasSuffix(path, "-key.pem"):
|
||||
certID := path[:len(path)-8]
|
||||
keyFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".pem"):
|
||||
certID := path[:len(path)-4]
|
||||
certFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".key"):
|
||||
certID := path[:len(path)-4]
|
||||
keyFiles[certID] = path
|
||||
case strings.HasSuffix(path, ".crt"):
|
||||
certID := path[:len(path)-4]
|
||||
certFiles[certID] = path
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error walking certificate store: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for certID, certPath := range certFiles {
|
||||
logger.Info("adding certificate", "certID", certID, "certPath", certPath)
|
||||
keyPath, ok := keyFiles[certID]
|
||||
if !ok {
|
||||
return servingTLS, fmt.Errorf("no key found for certificate %s", certPath)
|
||||
}
|
||||
if _, err := addCert(uriDomainsToCerts, certPath, keyPath, logger); err != nil {
|
||||
return servingTLS, fmt.Errorf("error adding certificate %s: %w", certPath, err)
|
||||
}
|
||||
rawCert, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return servingTLS, fmt.Errorf("error reading certificate: %w", err)
|
||||
}
|
||||
servingTLS.RootCAs.AppendCertsFromPEM(rawCert)
|
||||
}
|
||||
}
|
||||
|
||||
servingTLS.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if cert, ok := uriDomainsToCerts[hello.ServerName]; ok {
|
||||
return &cert, nil
|
||||
}
|
||||
if defaultCert != nil {
|
||||
return defaultCert, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no certificate found for %s", hello.ServerName)
|
||||
}
|
||||
servingTLS.Certificates = maps.Values(uriDomainsToCerts)
|
||||
return servingTLS, nil
|
||||
}
|
||||
|
||||
func runProxyServer(
|
||||
|
@ -188,8 +403,12 @@ func runProxyServer(
|
|||
q queue.Counter,
|
||||
waitFunc forwardWaitFunc,
|
||||
routingTable routing.Table,
|
||||
svcCache k8s.ServiceCache,
|
||||
timeouts *config.Timeouts,
|
||||
port int,
|
||||
tlsEnabled bool,
|
||||
tlsConfig map[string]interface{},
|
||||
tracingConfig *config.Tracing,
|
||||
) error {
|
||||
dialer := kedanet.NewNetDialer(timeouts.Connect, timeouts.KeepAlive)
|
||||
dialContextFunc := kedanet.DialContextWithRetry(dialer, timeouts.DefaultBackoff())
|
||||
|
@ -199,12 +418,31 @@ func runProxyServer(
|
|||
})
|
||||
go probeHandler.Start(ctx)
|
||||
|
||||
var tlsCfg *tls.Config
|
||||
if tlsEnabled {
|
||||
cfg, err := getTLSConfig(tlsConfig, logger)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Errorf("error creating certGetter for proxy server"), "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
tlsCfg = cfg
|
||||
}
|
||||
|
||||
var upstreamHandler http.Handler
|
||||
forwardingTLSCfg := &tls.Config{}
|
||||
if tlsCfg != nil {
|
||||
forwardingTLSCfg.RootCAs = tlsCfg.RootCAs
|
||||
forwardingTLSCfg.Certificates = tlsCfg.Certificates
|
||||
forwardingTLSCfg.InsecureSkipVerify = tlsCfg.InsecureSkipVerify
|
||||
}
|
||||
|
||||
upstreamHandler = newForwardingHandler(
|
||||
logger,
|
||||
dialContextFunc,
|
||||
waitFunc,
|
||||
newForwardingConfigFromTimeouts(timeouts),
|
||||
forwardingTLSCfg,
|
||||
tracingConfig,
|
||||
)
|
||||
upstreamHandler = middleware.NewCountingMiddleware(
|
||||
q,
|
||||
|
@ -216,13 +454,27 @@ func runProxyServer(
|
|||
routingTable,
|
||||
probeHandler,
|
||||
upstreamHandler,
|
||||
svcCache,
|
||||
tlsEnabled,
|
||||
)
|
||||
|
||||
if tracingConfig.Enabled {
|
||||
rootHandler = otelhttp.NewHandler(rootHandler, "keda-http-interceptor")
|
||||
}
|
||||
|
||||
rootHandler = middleware.NewLogging(
|
||||
logger,
|
||||
rootHandler,
|
||||
)
|
||||
|
||||
rootHandler = middleware.NewMetrics(
|
||||
rootHandler,
|
||||
)
|
||||
|
||||
addr := fmt.Sprintf("0.0.0.0:%d", port)
|
||||
logger.Info("proxy server starting", "address", addr)
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler)
|
||||
if tlsEnabled {
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler, tlsCfg)
|
||||
}
|
||||
return kedahttp.ServeContext(ctx, addr, rootHandler, nil)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,11 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -13,12 +16,15 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
"github.com/kedacore/http-add-on/interceptor/tracing"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
kedanet "github.com/kedacore/http-add-on/pkg/net"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
routingtest "github.com/kedacore/http-add-on/pkg/routing/test"
|
||||
)
|
||||
|
||||
const falseStr = "false"
|
||||
|
||||
func TestRunProxyServerCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8080
|
||||
|
@ -47,6 +53,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testservice",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
|
@ -57,13 +64,23 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (int, error) {
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return 1, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
_, err = tracing.SetupOTelSDK(ctx, &tracingCfg)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err, "Error setting up tracer")
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
|
@ -71,8 +88,12 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
false,
|
||||
map[string]interface{}{},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
// wait for server to start
|
||||
|
@ -101,6 +122,10 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if _, ok := resp.Header["Traceparent"]; !ok {
|
||||
return fmt.Errorf("expected Traceparent header to exist, but the header wasn't found")
|
||||
}
|
||||
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != "false" {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
|
@ -110,7 +135,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(+1, hostAndCount.Count)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
@ -125,7 +150,7 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(-1, hostAndCount.Count)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
@ -141,7 +166,306 @@ func TestRunProxyServerCountMiddleware(t *testing.T) {
|
|||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName])
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
}
|
||||
|
||||
func TestRunProxyServerWithTLSCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8443
|
||||
host = "samplehost"
|
||||
)
|
||||
r := require.New(t)
|
||||
ctx, done := context.WithCancel(
|
||||
context.Background(),
|
||||
)
|
||||
defer done()
|
||||
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
)
|
||||
originSrv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer originSrv.Close()
|
||||
originPort, err := strconv.Atoi(originURL.Port())
|
||||
r.NoError(err)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
q := queue.NewFakeCounter()
|
||||
|
||||
httpso := targetFromURL(
|
||||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testsvc",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
// set up a fake host that we can spoof
|
||||
// when we later send request to the proxy,
|
||||
// so that the proxy calculates a URL for that
|
||||
// host that points to the (above) fake origin
|
||||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return false, nil
|
||||
}
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
logr.Discard(),
|
||||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
true,
|
||||
map[string]interface{}{"certificatePath": "../certs/tls.crt", "keyPath": "../certs/tls.key", "skipVerify": true},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
|
||||
// wait for server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// make an HTTPs request in the background
|
||||
g.Go(func() error {
|
||||
f, err := os.ReadFile("../certs/RootCA.pem")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to find RootCA for test, please run tests via `make test`")
|
||||
}
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
rootCAs.AppendCertsFromPEM(f)
|
||||
|
||||
http.DefaultClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: rootCAs},
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"GET",
|
||||
fmt.Sprintf(
|
||||
"https://localhost:%d", port,
|
||||
), nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Host = host
|
||||
// Allow us to use our self made certs
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf(
|
||||
"unexpected status code: %d",
|
||||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != falseStr {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2000 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
||||
// tell the wait func to proceed
|
||||
select {
|
||||
case waiterCh <- struct{}{}:
|
||||
case <-time.After(5 * time.Second):
|
||||
r.Fail("timeout producing on waiterCh")
|
||||
}
|
||||
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
||||
// check the queue to make sure all counts are at 0
|
||||
countsPtr, err := q.Current()
|
||||
r.NoError(err)
|
||||
counts := countsPtr.Counts
|
||||
r.Equal(1, len(counts))
|
||||
_, foundHost := counts[namespacedName]
|
||||
r.True(
|
||||
foundHost,
|
||||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
}
|
||||
|
||||
func TestRunProxyServerWithMultipleCertsTLSCountMiddleware(t *testing.T) {
|
||||
const (
|
||||
port = 8443
|
||||
host = "samplehost"
|
||||
)
|
||||
r := require.New(t)
|
||||
ctx, done := context.WithCancel(
|
||||
context.Background(),
|
||||
)
|
||||
defer done()
|
||||
|
||||
originHdl := kedanet.NewTestHTTPHandlerWrapper(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
)
|
||||
originSrv, originURL, err := kedanet.StartTestServer(originHdl)
|
||||
r.NoError(err)
|
||||
defer originSrv.Close()
|
||||
originPort, err := strconv.Atoi(originURL.Port())
|
||||
r.NoError(err)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
q := queue.NewFakeCounter()
|
||||
|
||||
httpso := targetFromURL(
|
||||
originURL,
|
||||
originPort,
|
||||
"testdepl",
|
||||
"testsvc",
|
||||
)
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
||||
// set up a fake host that we can spoof
|
||||
// when we later send request to the proxy,
|
||||
// so that the proxy calculates a URL for that
|
||||
// host that points to the (above) fake origin
|
||||
// server
|
||||
routingTable := routingtest.NewTable()
|
||||
routingTable.Memory[host] = httpso
|
||||
svcCache := k8s.NewFakeServiceCache()
|
||||
|
||||
timeouts := &config.Timeouts{}
|
||||
waiterCh := make(chan struct{})
|
||||
waitFunc := func(_ context.Context, _, _ string) (bool, error) {
|
||||
<-waiterCh
|
||||
return false, nil
|
||||
}
|
||||
|
||||
tracingCfg := config.Tracing{Enabled: true, Exporter: "otlphttp"}
|
||||
|
||||
g.Go(func() error {
|
||||
return runProxyServer(
|
||||
ctx,
|
||||
logr.Discard(),
|
||||
q,
|
||||
waitFunc,
|
||||
routingTable,
|
||||
svcCache,
|
||||
timeouts,
|
||||
port,
|
||||
true,
|
||||
map[string]interface{}{"certstorePaths": "../certs"},
|
||||
&tracingCfg,
|
||||
)
|
||||
})
|
||||
|
||||
// wait for server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// make an HTTPs request in the background
|
||||
g.Go(func() error {
|
||||
f, err := os.ReadFile("../certs/RootCA.pem")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to find RootCA for test, please run tests via `make test`")
|
||||
}
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
rootCAs.AppendCertsFromPEM(f)
|
||||
|
||||
http.DefaultClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{RootCAs: rootCAs},
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(
|
||||
"GET",
|
||||
fmt.Sprintf(
|
||||
"https://localhost:%d", port,
|
||||
), nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Host = host
|
||||
// Allow us to use our self made certs
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf(
|
||||
"unexpected status code: %d",
|
||||
resp.StatusCode,
|
||||
)
|
||||
}
|
||||
if resp.Header.Get("X-KEDA-HTTP-Cold-Start") != falseStr {
|
||||
return fmt.Errorf("expected X-KEDA-HTTP-Cold-Start false, but got %s", resp.Header.Get("X-KEDA-HTTP-Cold-Start"))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2000 * time.Millisecond):
|
||||
r.Fail("timeout waiting for +1 queue resize")
|
||||
}
|
||||
|
||||
// tell the wait func to proceed
|
||||
select {
|
||||
case waiterCh <- struct{}{}:
|
||||
case <-time.After(5 * time.Second):
|
||||
r.Fail("timeout producing on waiterCh")
|
||||
}
|
||||
|
||||
select {
|
||||
case hostAndCount := <-q.ResizedCh:
|
||||
r.Equal(namespacedName, hostAndCount.Host)
|
||||
r.Equal(1, hostAndCount.Count)
|
||||
case <-time.After(2 * time.Second):
|
||||
r.Fail("timeout waiting for -1 queue resize")
|
||||
}
|
||||
|
||||
// check the queue to make sure all counts are at 0
|
||||
countsPtr, err := q.Current()
|
||||
r.NoError(err)
|
||||
counts := countsPtr.Counts
|
||||
r.Equal(1, len(counts))
|
||||
_, foundHost := counts[namespacedName]
|
||||
r.True(
|
||||
foundHost,
|
||||
"couldn't find host %s in the queue",
|
||||
host,
|
||||
)
|
||||
r.Equal(0, counts[namespacedName].Concurrency)
|
||||
|
||||
done()
|
||||
r.Error(g.Wait())
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/kedacore/http-add-on/interceptor/config"
|
||||
)
|
||||
|
||||
var (
|
||||
collectors []Collector
|
||||
)
|
||||
|
||||
const meterName = "keda-interceptor-proxy"
|
||||
|
||||
type Collector interface {
|
||||
RecordRequestCount(method string, path string, responseCode int, host string)
|
||||
RecordPendingRequestCount(host string, value int64)
|
||||
}
|
||||
|
||||
func NewMetricsCollectors(metricsConfig *config.Metrics) {
|
||||
if metricsConfig.OtelPrometheusExporterEnabled {
|
||||
promometrics := NewPrometheusMetrics()
|
||||
collectors = append(collectors, promometrics)
|
||||
}
|
||||
|
||||
if metricsConfig.OtelHTTPExporterEnabled {
|
||||
otelhttpmetrics := NewOtelMetrics()
|
||||
collectors = append(collectors, otelhttpmetrics)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
for _, collector := range collectors {
|
||||
collector.RecordRequestCount(method, path, responseCode, host)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordPendingRequestCount(host string, value int64) {
|
||||
for _, collector := range collectors {
|
||||
collector.RecordPendingRequestCount(host, value)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
api "go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
)
|
||||
|
||||
type OtelMetrics struct {
|
||||
meter api.Meter
|
||||
requestCounter api.Int64Counter
|
||||
pendingRequestCounter api.Int64UpDownCounter
|
||||
}
|
||||
|
||||
func NewOtelMetrics(options ...metric.Option) *OtelMetrics {
|
||||
ctx := context.Background()
|
||||
|
||||
exporter, err := otlpmetrichttp.New(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("could not create otelmetrichttp exporter: %v", err)
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("interceptor-proxy"),
|
||||
semconv.ServiceVersionKey.String(build.Version()),
|
||||
)
|
||||
|
||||
options = []metric.Option{
|
||||
metric.WithReader(metric.NewPeriodicReader(exporter)),
|
||||
metric.WithResource(res),
|
||||
}
|
||||
}
|
||||
|
||||
provider := metric.NewMeterProvider(options...)
|
||||
meter := provider.Meter(meterName)
|
||||
|
||||
reqCounter, err := meter.Int64Counter("interceptor_request_count", api.WithDescription("a counter of requests processed by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new otelhttpmetric request counter: %v", err)
|
||||
}
|
||||
|
||||
pendingRequestCounter, err := meter.Int64UpDownCounter("interceptor_pending_request_count", api.WithDescription("a count of requests pending forwarding by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new otelhttpmetric pending request counter: %v", err)
|
||||
}
|
||||
|
||||
return &OtelMetrics{
|
||||
meter: meter,
|
||||
requestCounter: reqCounter,
|
||||
pendingRequestCounter: pendingRequestCounter,
|
||||
}
|
||||
}
|
||||
|
||||
func (om *OtelMetrics) RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("method").String(method),
|
||||
attribute.Key("path").String(path),
|
||||
attribute.Key("code").Int(responseCode),
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
om.requestCounter.Add(ctx, 1, opt)
|
||||
}
|
||||
|
||||
func (om *OtelMetrics) RecordPendingRequestCount(host string, value int64) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
|
||||
om.pendingRequestCounter.Add(ctx, value, opt)
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
var (
|
||||
testOtel *OtelMetrics
|
||||
testReader metric.Reader
|
||||
)
|
||||
|
||||
func init() {
|
||||
testReader = metric.NewManualReader()
|
||||
options := metric.WithReader(testReader)
|
||||
testOtel = NewOtelMetrics(options)
|
||||
}
|
||||
|
||||
func TestRequestCounter(t *testing.T) {
|
||||
testOtel.RecordRequestCount("GET", "/test", 200, "test-host-1")
|
||||
got := metricdata.ResourceMetrics{}
|
||||
err := testReader.Collect(context.Background(), &got)
|
||||
|
||||
assert.Nil(t, err)
|
||||
scopeMetrics := got.ScopeMetrics[0]
|
||||
assert.NotEqual(t, len(scopeMetrics.Metrics), 0)
|
||||
|
||||
metricInfo := retrieveMetric(scopeMetrics.Metrics, "interceptor_request_count")
|
||||
data := metricInfo.Data.(metricdata.Sum[int64]).DataPoints[0]
|
||||
assert.Equal(t, data.Value, int64(1))
|
||||
}
|
||||
|
||||
func TestPendingRequestCounter(t *testing.T) {
|
||||
testOtel.RecordPendingRequestCount("test-host", 5)
|
||||
got := metricdata.ResourceMetrics{}
|
||||
err := testReader.Collect(context.Background(), &got)
|
||||
|
||||
assert.Nil(t, err)
|
||||
scopeMetrics := got.ScopeMetrics[0]
|
||||
assert.NotEqual(t, len(scopeMetrics.Metrics), 0)
|
||||
|
||||
metricInfo := retrieveMetric(scopeMetrics.Metrics, "interceptor_pending_request_count")
|
||||
data := metricInfo.Data.(metricdata.Sum[int64]).DataPoints[0]
|
||||
assert.Equal(t, data.Value, int64(5))
|
||||
}
|
||||
|
||||
func retrieveMetric(metrics []metricdata.Metrics, metricname string) *metricdata.Metrics {
|
||||
for _, m := range metrics {
|
||||
if m.Name == metricname {
|
||||
return &m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
api "go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
"github.com/kedacore/http-add-on/pkg/build"
|
||||
)
|
||||
|
||||
type PrometheusMetrics struct {
|
||||
meter api.Meter
|
||||
requestCounter api.Int64Counter
|
||||
pendingRequestCounter api.Int64UpDownCounter
|
||||
}
|
||||
|
||||
func NewPrometheusMetrics(options ...prometheus.Option) *PrometheusMetrics {
|
||||
var exporter *prometheus.Exporter
|
||||
var err error
|
||||
if options == nil {
|
||||
exporter, err = prometheus.New()
|
||||
} else {
|
||||
exporter, err = prometheus.New(options...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("could not create Prometheus exporter: %v", err)
|
||||
}
|
||||
|
||||
res := resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("interceptor-proxy"),
|
||||
semconv.ServiceVersionKey.String(build.Version()),
|
||||
)
|
||||
|
||||
provider := metric.NewMeterProvider(
|
||||
metric.WithReader(exporter),
|
||||
metric.WithResource(res),
|
||||
)
|
||||
meter := provider.Meter(meterName)
|
||||
|
||||
reqCounter, err := meter.Int64Counter("interceptor_request_count", api.WithDescription("a counter of requests processed by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new Prometheus request counter: %v", err)
|
||||
}
|
||||
|
||||
pendingRequestCounter, err := meter.Int64UpDownCounter("interceptor_pending_request_count", api.WithDescription("a count of requests pending forwarding by the interceptor proxy"))
|
||||
if err != nil {
|
||||
log.Fatalf("could not create new Prometheus pending request counter: %v", err)
|
||||
}
|
||||
|
||||
return &PrometheusMetrics{
|
||||
meter: meter,
|
||||
requestCounter: reqCounter,
|
||||
pendingRequestCounter: pendingRequestCounter,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PrometheusMetrics) RecordRequestCount(method string, path string, responseCode int, host string) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("method").String(method),
|
||||
attribute.Key("path").String(path),
|
||||
attribute.Key("code").Int(responseCode),
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
p.requestCounter.Add(ctx, 1, opt)
|
||||
}
|
||||
|
||||
func (p *PrometheusMetrics) RecordPendingRequestCount(host string, value int64) {
|
||||
ctx := context.Background()
|
||||
opt := api.WithAttributeSet(
|
||||
attribute.NewSet(
|
||||
attribute.Key("host").String(host),
|
||||
),
|
||||
)
|
||||
|
||||
p.pendingRequestCounter.Add(ctx, value, opt)
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
promexporter "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
)
|
||||
|
||||
func TestPromRequestCountMetric(t *testing.T) {
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
options := []promexporter.Option{promexporter.WithRegisterer(testRegistry)}
|
||||
testPrometheus := NewPrometheusMetrics(options...)
|
||||
expectedOutput := `
|
||||
# HELP interceptor_request_count_total a counter of requests processed by the interceptor proxy
|
||||
# TYPE interceptor_request_count_total counter
|
||||
interceptor_request_count_total{code="500",host="test-host",method="post",otel_scope_name="keda-interceptor-proxy",otel_scope_version="",path="/test"} 1
|
||||
interceptor_request_count_total{code="200",host="test-host",method="post",otel_scope_name="keda-interceptor-proxy",otel_scope_version="",path="/test"} 1
|
||||
# HELP otel_scope_info Instrumentation Scope metadata
|
||||
# TYPE otel_scope_info gauge
|
||||
otel_scope_info{otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 1
|
||||
# HELP target_info Target metadata
|
||||
# TYPE target_info gauge
|
||||
target_info{"service.name"="interceptor-proxy","service.version"="main"} 1
|
||||
`
|
||||
expectedOutputReader := strings.NewReader(expectedOutput)
|
||||
testPrometheus.RecordRequestCount("post", "/test", 500, "test-host")
|
||||
testPrometheus.RecordRequestCount("post", "/test", 200, "test-host")
|
||||
err := testutil.CollectAndCompare(testRegistry, expectedOutputReader)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestPromPendingRequestCountMetric(t *testing.T) {
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
options := []promexporter.Option{promexporter.WithRegisterer(testRegistry)}
|
||||
testPrometheus := NewPrometheusMetrics(options...)
|
||||
expectedOutput := `
|
||||
# HELP interceptor_pending_request_count a count of requests pending forwarding by the interceptor proxy
|
||||
# TYPE interceptor_pending_request_count gauge
|
||||
interceptor_pending_request_count{host="test-host",otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 10
|
||||
# HELP otel_scope_info Instrumentation Scope metadata
|
||||
# TYPE otel_scope_info gauge
|
||||
otel_scope_info{otel_scope_name="keda-interceptor-proxy",otel_scope_version=""} 1
|
||||
# HELP target_info Target metadata
|
||||
# TYPE target_info gauge
|
||||
target_info{"service.name"="interceptor-proxy","service.version"="main"} 1
|
||||
`
|
||||
expectedOutputReader := strings.NewReader(expectedOutput)
|
||||
testPrometheus.RecordPendingRequestCount("test-host", 10)
|
||||
err := testutil.CollectAndCompare(testRegistry, expectedOutputReader)
|
||||
assert.Nil(t, err)
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"github.com/kedacore/http-add-on/interceptor/metrics"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
"github.com/kedacore/http-add-on/pkg/queue"
|
||||
"github.com/kedacore/http-add-on/pkg/util"
|
||||
|
@ -62,21 +63,25 @@ func (cm *Counting) count(ctx context.Context, signaler util.Signaler) {
|
|||
}
|
||||
|
||||
func (cm *Counting) inc(logger logr.Logger, key string) bool {
|
||||
if err := cm.queueCounter.Resize(key, +1); err != nil {
|
||||
if err := cm.queueCounter.Increase(key, 1); err != nil {
|
||||
logger.Error(err, "error incrementing queue counter", "key", key)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
metrics.RecordPendingRequestCount(key, int64(1))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cm *Counting) dec(logger logr.Logger, key string) bool {
|
||||
if err := cm.queueCounter.Resize(key, -1); err != nil {
|
||||
if err := cm.queueCounter.Decrease(key, 1); err != nil {
|
||||
logger.Error(err, "error decrementing queue counter", "key", key)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
metrics.RecordPendingRequestCount(key, int64(-1))
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
httpv1alpha1 "github.com/kedacore/http-add-on/operator/apis/http/v1alpha1"
|
||||
"github.com/kedacore/http-add-on/pkg/k8s"
|
||||
|
@ -34,11 +34,11 @@ func TestCountMiddleware(t *testing.T) {
|
|||
},
|
||||
Spec: httpv1alpha1.HTTPScaledObjectSpec{
|
||||
ScaleTargetRef: httpv1alpha1.ScaleTargetRef{
|
||||
Deployment: "testdepl",
|
||||
Service: ":",
|
||||
Port: 8080,
|
||||
Name: "testdepl",
|
||||
Service: "testservice",
|
||||
Port: 8080,
|
||||
},
|
||||
TargetPendingRequests: pointer.Int32(123),
|
||||
TargetPendingRequests: ptr.To[int32](123),
|
||||
},
|
||||
}
|
||||
namespacedName := k8s.NamespacedNameFromObject(httpso).String()
|
||||
|
@ -56,10 +56,9 @@ func TestCountMiddleware(t *testing.T) {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
// for a valid request, we expect the queue to be resized twice.
|
||||
// for a valid request, we expect the queue to be modified twice.
|
||||
// once to mark a pending HTTP request, then a second time to remove it.
|
||||
// by the end of both sends, resize1 + resize2 should be 0,
|
||||
// or in other words, the queue size should be back to zero
|
||||
// by the end of both sends, increase1 + decrease1 should be 2
|
||||
|
||||
// run middleware with the host in the request
|
||||
req, err := http.NewRequest("GET", "/something", nil)
|
||||
|
@ -70,7 +69,7 @@ func TestCountMiddleware(t *testing.T) {
|
|||
req = req.WithContext(reqCtx)
|
||||
req.Host = uri.Host
|
||||
|
||||
agg, respRecorder := expectResizes(
|
||||
agg, respRecorder := expectUpdates(
|
||||
ctx,
|
||||
t,
|
||||
2,
|
||||
|
@ -86,10 +85,10 @@ func TestCountMiddleware(t *testing.T) {
|
|||
)
|
||||
r.Equal(http.StatusOK, respRecorder.Code)
|
||||
r.Equal(http.StatusText(respRecorder.Code), respRecorder.Body.String())
|
||||
r.Equal(0, agg)
|
||||
r.Equal(2, agg)
|
||||
}
|
||||
|
||||
// expectResizes creates a new httptest.ResponseRecorder, then passes req through
|
||||
// expectUpdates creates a new httptest.ResponseRecorder, then passes req through
|
||||
// the middleware. every time the middleware calls fakeCounter.Resize(), it calls
|
||||
// resizeCheckFn with t and the queue.HostCount that represents the resize call
|
||||
// that was made. it also maintains an aggregate delta of the counts passed to
|
||||
|
@ -98,7 +97,7 @@ func TestCountMiddleware(t *testing.T) {
|
|||
//
|
||||
// this function returns the aggregate and the httptest.ResponseRecorder that was
|
||||
// created and used with the middleware
|
||||
func expectResizes(
|
||||
func expectUpdates(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
nResizes int,
|
||||
|
|
|
@ -31,7 +31,7 @@ var _ http.Handler = (*Logging)(nil)
|
|||
|
||||
func (lm *Logging) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
r = util.RequestWithLogger(r, lm.logger.WithName("LoggingMiddleware"))
|
||||
w = newLoggingResponseWriter(w)
|
||||
w = newResponseWriter(w)
|
||||
|
||||
var sw util.Stopwatch
|
||||
defer lm.logAsync(w, r, &sw)
|
||||
|
@ -50,9 +50,9 @@ func (lm *Logging) log(w http.ResponseWriter, r *http.Request, sw *util.Stopwatc
|
|||
ctx := r.Context()
|
||||
logger := util.LoggerFromContext(ctx)
|
||||
|
||||
lrw := w.(*loggingResponseWriter)
|
||||
lrw := w.(*responseWriter)
|
||||
if lrw == nil {
|
||||
lrw = newLoggingResponseWriter(w)
|
||||
lrw = newResponseWriter(w)
|
||||
}
|
||||
|
||||
timestamp := sw.StartTime().Format(CombinedLogTimeFormat)
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
downstreamResponseWriter http.ResponseWriter
|
||||
bytesWritten int
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func newLoggingResponseWriter(downstreamResponseWriter http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{
|
||||
downstreamResponseWriter: downstreamResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) BytesWritten() int {
|
||||
return lrw.bytesWritten
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) StatusCode() int {
|
||||
return lrw.statusCode
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = (*loggingResponseWriter)(nil)
|
||||
|
||||
func (lrw *loggingResponseWriter) Header() http.Header {
|
||||
return lrw.downstreamResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
n, err := lrw.downstreamResponseWriter.Write(bytes)
|
||||
|
||||
lrw.bytesWritten += n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(statusCode int) {
|
||||
lrw.downstreamResponseWriter.WriteHeader(statusCode)
|
||||
|
||||
lrw.statusCode = statusCode
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue