Compare commits

..

No commits in common. "main" and "v1.42.0" have entirely different histories.

236 changed files with 3323 additions and 10304 deletions

4
.ci/after-success.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
echo "Uploading code coverage results"
bash <(curl -s https://codecov.io/bash)

View File

@ -5,11 +5,7 @@ if [[ -z $OPERATOR_VERSION ]]; then
exit 1
fi
JAEGER_VERSION=$(echo $JAEGER_VERSION | tr -d '"')
JAEGER_AGENT_VERSION=$(echo $JAEGER_AGENT_VERSION | tr -d '"')
PREVIOUS_VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
@ -24,11 +20,11 @@ sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSI
sed -i "s~all-in-one:.*~all-in-one:${JAEGER_VERSION}~gi" examples/all-in-one-with-options.yaml
# statefulset-manual-sidecar
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
# operator-with-tracing
sed -i "s~jaeger-operator:.*~jaeger-operator:${OPERATOR_VERSION}~gi" examples/operator-with-tracing.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/operator-with-tracing.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_VERSION}~gi" examples/operator-with-tracing.yaml
# tracegen
sed -i "s~jaeger-tracegen:.*~jaeger-tracegen:${JAEGER_VERSION}~gi" examples/tracegen.yaml

3
.ci/upload-test-coverage.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
bash <(curl -s https://codecov.io/bash)

View File

@ -1,54 +1,11 @@
version: 2
updates:
- package-ecosystem: docker
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: docker
directory: "/tests"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- go
- ok-to-test
groups:
golang-org-x:
patterns:
- "golang.org/x/*"
opentelemetry:
patterns:
- "go.opentelemetry.io/*"
prometheus:
patterns:
- "github.com/prometheus-operator/prometheus-operator"
- "github.com/prometheus-operator/prometheus-operator/*"
- "github.com/prometheus/prometheus"
- "github.com/prometheus/prometheus/*"
- "github.com/prometheus/client_go"
- "github.com/prometheus/client_go/*"
kubernetes:
patterns:
- "k8s.io/*"
- "sigs.k8s.io/*"
- package-ecosystem: "github-actions"
directory: "/"
@ -56,7 +13,3 @@ updates:
interval: "daily"
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- github_actions
- ok-to-test

View File

@ -10,22 +10,19 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
basic-checks:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
uses: actions/setup-go@v3
with:
go-version: "1.22"
go-version: 1.18
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
@ -37,6 +34,6 @@ jobs:
run: make install-tools ci
- name: "upload test coverage report"
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
with:
token: ${{ secrets.CODECOV_TOKEN }}
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
run: ./.ci/upload-test-coverage.sh

View File

@ -10,15 +10,8 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
codeql-analyze:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
name: CodeQL Analyze
runs-on: ubuntu-latest
@ -29,24 +22,19 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: "go.mod"
uses: actions/checkout@v3
# Disable CodeQL for tests
# https://github.com/github/codeql/issues/4786
- run: rm -rf ./tests
- name: Initialize CodeQL
uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/init@v2
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/analyze@v2

View File

@ -0,0 +1,34 @@
name: Elasticsearch E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-elasticsearch-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run Elasticsearch E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: elasticsearch
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-examples.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Examples E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-examples-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run examples E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: examples
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-generate.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Generate E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-generate-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run generate E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: generate
kube_version: ${{ matrix.kube-version }}

View File

@ -0,0 +1,34 @@
name: Miscellaneous E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-miscellaneous-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run miscellaneous E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: miscellaneous
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-sidecar.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Sidecar E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-sidecar-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run sidecar E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: sidecar
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-streaming.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Streaming E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-streaming-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run streaming E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: streaming
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-ui.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: UI E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-allinone-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: UI E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: ui
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-upgrade.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Upgrade E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-upgrade-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.25"
name: Run upgrade E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: upgrade
kube_version: ${{ matrix.kube-version }}

View File

@ -1,84 +0,0 @@
name: E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
permissions:
contents: read
jobs:
e2e:
name: "Run ${{ matrix.testsuite.label }} E2E tests (${{ matrix.kube-version }})"
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
kube-version:
- "1.19"
- "1.30"
testsuite:
- { name: "elasticsearch", label: "Elasticsearch" }
- { name: "examples", label: "Examples" }
- { name: "generate", label: "Generate" }
- { name: "miscellaneous", label: "Miscellaneous" }
- { name: "sidecar", label: "Sidecar" }
- { name: "streaming", label: "Streaming" }
- { name: "ui", label: "UI" }
- { name: "upgrade", label: "Upgrade" }
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: "1.22"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
with:
install: true
- name: Cache Docker layers
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: /tmp/.buildx-cache
key: e2e-${{ github.sha }}
restore-keys: |
e2e-
- name: "Install KIND"
run: ./hack/install/install-kind.sh
shell: bash
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
shell: bash
- name: "Install gomplate"
run: ./hack/install/install-gomplate.sh
shell: bash
- name: "Install dependencies"
run: make install-tools
shell: bash
- name: "Run ${{ matrix.testsuite.label }} E2E test suite on Kube ${{ matrix.kube-version }}"
env:
VERBOSE: "true"
KUBE_VERSION: "${{ matrix.kube-version }}"
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make run-e2e-tests-${{ matrix.testsuite.name }}
shell: bash
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
shell: bash

View File

@ -1,54 +0,0 @@
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '45 13 * * 1'
push:
branches: [ "main" ]
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
with:
sarif_file: results.sarif

View File

@ -6,18 +6,15 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
publish:
runs-on: ubuntu-latest
env:
USER: jaegertracing
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: actions/checkout@v3
- uses: docker/setup-qemu-action@v2.1.0
- uses: docker/setup-buildx-action@v2.4.0
- name: "publishes the images"
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}

View File

@ -6,25 +6,26 @@ on:
- 'v*'
jobs:
release:
unit-tests:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
steps:
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
uses: actions/setup-go@v3
with:
go-version: "1.22"
go-version: 1.18
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
- name: "install kustomize"
run: ./hack/install/install-kustomize.sh
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: docker/setup-qemu-action@v2.1.0
- uses: docker/setup-buildx-action@v2.4.0
- name: "generate release resources"
run: make release-artifacts USER=jaegertracing

View File

@ -10,16 +10,13 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
operator-sdk-scorecard:
name: "Operator-SDK Scorecard"
runs-on: ubuntu-latest
steps:
- name: "Check out code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v3
- name: "Install KIND"
run: ./hack/install/install-kind.sh
- name: "Install KUTTL"

View File

@ -1,3 +1,26 @@
run:
go: '1.18'
timeout: 10m
linters-settings:
goimports:
local-prefixes: github.com/jaegertracing/jaeger-operator
gosimple:
go: "1.18"
linters:
enable:
- depguard
- gofmt
- gofumpt
- goimports
- gosec
- govet
- misspell
- bidichk
disable:
- errcheck
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
@ -5,29 +28,3 @@ issues:
- path: _test\.go
linters:
- gosec
- linters:
- staticcheck
text: "SA1019:"
linters-settings:
goimports:
local-prefixes: github.com/jaegertracing/jaeger-operator
gosimple:
go: "1.22"
linters:
enable:
- bidichk
- errorlint
- gofumpt
- goimports
- gosec
- govet
- misspell
- testifylint
disable:
- errcheck
run:
go: '1.22'
timeout: 10m

View File

@ -1,110 +1,5 @@
Changes by Version
==================
## v1.65.0 (2025-01-22)
* Pin agent version to 1.62.0 ([#2790](https://github.com/jaegertracing/jaeger-operator/pull/2790), [@rubenvp8510](https://github.com/rubenvp8510))
* Added compatibility for Jaeger Operator v1.61.x and v1.62.x ([#2725](https://github.com/jaegertracing/jaeger-operator/pull/2725), [@mooneeb](https://github.com/mooneeb))
## v1.62.0 (2024-10-10)
* TRACING-4238 | Fix gatewat 502 timeout ([#2694](https://github.com/jaegertracing/jaeger-operator/pull/2694), [@pavolloffay](https://github.com/pavolloffay))
* feat: added missing test for elasticsearch reconciler ([#2662](https://github.com/jaegertracing/jaeger-operator/pull/2662), [@Ankit152](https://github.com/Ankit152))
## v1.61.0 (2024-09-16)
* Bump google.golang.org/grpc from 1.66.0 to 1.66.1 ([#2675](https://github.com/jaegertracing/jaeger-operator/pull/2675), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump google.golang.org/grpc from 1.65.0 to 1.66.0 ([#2670](https://github.com/jaegertracing/jaeger-operator/pull/2670), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump the opentelemetry group with 9 updates ([#2668](https://github.com/jaegertracing/jaeger-operator/pull/2668), [@dependabot[bot]](https://github.com/apps/dependabot))
## v1.60.0 (2024-08-13)
* Fix Golang version in go.mod ([#2652](https://github.com/jaegertracing/jaeger-operator/pull/2652), [@iblancasa](https://github.com/iblancasa))
## v1.60.0 (2024-08-09)
* Test on k8s 1.30 ([#2647](https://github.com/jaegertracing/jaeger-operator/pull/2647), [@pavolloffay](https://github.com/pavolloffay))
* Bump go to 1.22 and controller-gen to 1.14 ([#2646](https://github.com/jaegertracing/jaeger-operator/pull/2646), [@pavolloffay](https://github.com/pavolloffay))
## v1.59.0 (2024-08-06)
* Update compatibility matrix for v1.57.x ([#2594](https://github.com/jaegertracing/jaeger-operator/pull/2594), [@mooneeb](https://github.com/mooneeb))
* imagePullSecrets is not set for agent DaemonSet ([#2563](https://github.com/jaegertracing/jaeger-operator/pull/2563), [@antoniomerlin](https://github.com/antoniomerlin))
## v1.57.0 (2024-05-06)
## v1.55.0 (2024-03-22)
* Add server URL to JaegerMetricsStorageSpec ([#2481](https://github.com/jaegertracing/jaeger-operator/pull/2481), [@antoniomerlin](https://github.com/antoniomerlin))
* Use the host set in the Ingess field for the OpenShift Route ([#2409](https://github.com/jaegertracing/jaeger-operator/pull/2409), [@iblancasa](https://github.com/iblancasa))
* Add minimum Kubernetes and OpenShift versions ([#2492](https://github.com/jaegertracing/jaeger-operator/pull/2492), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
## v1.54.0 (2024-02-14)
* apis/v1: add jaeger agent deprecation warning ([#2471](https://github.com/jaegertracing/jaeger-operator/pull/2471), [@frzifus](https://github.com/frzifus))
## V1.53.0 (2024-01-17)
* Choose the newer autoscaling version by default ([#2374](https://github.com/jaegertracing/jaeger-operator/pull/2374), [@iblancasa](https://github.com/iblancasa))
* Upgrade operator-sdk to 1.32.0 ([#2388](https://github.com/jaegertracing/jaeger-operator/pull/2388), [@iblancasa](https://github.com/iblancasa))
* Fix containerImage field and remove statement about failing CI ([#2386](https://github.com/jaegertracing/jaeger-operator/pull/2386), [@iblancasa](https://github.com/iblancasa))
* Fix injection: prefer jaeger in the same namespace ([#2383](https://github.com/jaegertracing/jaeger-operator/pull/2383), [@pavolloffay](https://github.com/pavolloffay))
## v1.52.0 (2023-12-07)
* Add missing container security context settings and tests ([#2354](https://github.com/jaegertracing/jaeger-operator/pull/2354), [@tingeltangelthomas](https://github.com/tingeltangelthomas))
## v1.51.0 (2023-11-17)
* Support configuring images via RELATED_IMAGE_ environment variables ([#2355](https://github.com/jaegertracing/jaeger-operator/pull/2355), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
* Regenerate ES certificated when is close to 1 day for expire ([#2356](https://github.com/jaegertracing/jaeger-operator/pull/2356), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump actions/checkout from 3 to 4 ([#2316](https://github.com/jaegertracing/jaeger-operator/pull/2316), [@dependabot[bot]](https://github.com/apps/dependabot))
* bump grpc to 1.58.3 ([#2346](https://github.com/jaegertracing/jaeger-operator/pull/2346), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump golang version to 1.21 ([#2347](https://github.com/jaegertracing/jaeger-operator/pull/2347), [@rubenvp8510](https://github.com/rubenvp8510))
* Ensure oauth-proxy ImageStream is detected eventually ([#2340](https://github.com/jaegertracing/jaeger-operator/pull/2340), [@bverschueren](https://github.com/bverschueren))
* Check if envFrom has ConfigMapRef set ([#2342](https://github.com/jaegertracing/jaeger-operator/pull/2342), [@edwardecook](https://github.com/edwardecook))
* Bump golang.org/x/net from 0.13.0 to 0.17.0 ([#2343](https://github.com/jaegertracing/jaeger-operator/pull/2343), [@dependabot[bot]](https://github.com/apps/dependabot))
* Fix issue related to new encoding in oauth-proxy image ([#2345](https://github.com/jaegertracing/jaeger-operator/pull/2345), [@iblancasa](https://github.com/iblancasa))
* Always generate new oauth-proxy password ([#2333](https://github.com/jaegertracing/jaeger-operator/pull/2333), [@pavolloffay](https://github.com/pavolloffay))
* Add v1.48.x and v1.49.x to the support map ([#2332](https://github.com/jaegertracing/jaeger-operator/pull/2332), [@ishaqkhattana](https://github.com/ishaqkhattana))
* Pass proxy env vars to operands ([#2330](https://github.com/jaegertracing/jaeger-operator/pull/2330), [@pavolloffay](https://github.com/pavolloffay))
* Protect auth delegator behind a mutex ([#2318](https://github.com/jaegertracing/jaeger-operator/pull/2318), [@iblancasa](https://github.com/iblancasa))
## v1.49.1 (2023-09-07)
* fix: protect the kafka-profision setting behind a mutex ([#2308](https://github.com/jaegertracing/jaeger-operator/pull/2308), [@iblancasa](https://github.com/iblancasa))
## v1.48.1 (2023-09-04)
* Use base image that does not require subscription (centos 9 stream) ([#2313](https://github.com/jaegertracing/jaeger-operator/pull/2313), [@pavolloffay](https://github.com/pavolloffay))
* Update go dependencies to Kubernetes 0.28.1 ([#2301](https://github.com/jaegertracing/jaeger-operator/pull/2301), [@pavolloffay](https://github.com/pavolloffay))
* Protect the ESProvisioning setting behind a mutex ([#2287](https://github.com/jaegertracing/jaeger-operator/pull/2287), [@iblancasa](https://github.com/iblancasa))
## v1.48.0 (2023-08-28)
* Remove the TokenReview after checking we can create it ([#2286](https://github.com/jaegertracing/jaeger-operator/pull/2286), [@iblancasa](https://github.com/iblancasa))
* Fix apiVersion and kind are missing in jaeger-operator generate output ([#2281](https://github.com/jaegertracing/jaeger-operator/pull/2281), [@hiteshwani29](https://github.com/hiteshwani29))
* Fix custom labels for the deployable components in production strategy ([#2277](https://github.com/jaegertracing/jaeger-operator/pull/2277), [@hiteshwani29](https://github.com/hiteshwani29))
* Ensure the OAuth Proxy image detection is run after the platform detection ([#2280](https://github.com/jaegertracing/jaeger-operator/pull/2280), [@iblancasa](https://github.com/iblancasa))
* Added changes to respect env variable set from envFrom configMaps ([#2272](https://github.com/jaegertracing/jaeger-operator/pull/2272), [@hiteshwani29](https://github.com/hiteshwani29))
* Refactor the autodetect module to reduce the number of writes/reads in viper configuration ([#2274](https://github.com/jaegertracing/jaeger-operator/pull/2274), [@iblancasa](https://github.com/iblancasa))
## v1.47.0 (2023-07-12)
* Expose admin ports for agent, collector, and query Deployments via the equivalent Service ([#2262](https://github.com/jaegertracing/jaeger-operator/pull/2262), [@thomaspaulin](https://github.com/thomaspaulin))
* update otel sdk to v1.16.0/v0.39.0 ([#2261](https://github.com/jaegertracing/jaeger-operator/pull/2261), [@frzifus](https://github.com/frzifus))
* Extended compatibility matrix ([#2255](https://github.com/jaegertracing/jaeger-operator/pull/2255), [@shazib-summar](https://github.com/shazib-summar))
* Add support for Kubernetes 1.27 ([#2235](https://github.com/jaegertracing/jaeger-operator/pull/2235), [@iblancasa](https://github.com/iblancasa))
* Jaeger Collector Config: `Lifecycle` and `TerminationGracePeriodSeconds` ([#2242](https://github.com/jaegertracing/jaeger-operator/pull/2242), [@taj-p](https://github.com/taj-p))
## v1.46.0 (2023-06-16)
* Missing exposed port 16685 in query deployments ([#2239](https://github.com/jaegertracing/jaeger-operator/pull/2239), [@iblancasa](https://github.com/iblancasa))
* Use Golang 1.20 ([#2205](https://github.com/jaegertracing/jaeger-operator/pull/2205), [@iblancasa](https://github.com/iblancasa))
* [BugFix] Properly set imagePullPolicy and containerSecurityContext for EsIndexCleaner cronjob container ([#2224](https://github.com/jaegertracing/jaeger-operator/pull/2224), [@michalschott](https://github.com/michalschott))
* Remove resource limitation for the operator pod ([#2221](https://github.com/jaegertracing/jaeger-operator/pull/2221), [@iblancasa](https://github.com/iblancasa))
* Add PriorityClass for AllInOne strategy ([#2218](https://github.com/jaegertracing/jaeger-operator/pull/2218), [@sonofgibs](https://github.com/sonofgibs))
## v1.45.0 (2023-05-16)
## v1.44.0 (2023-04-13)
* Feat: add `NodeSelector` to jaeger collector, query, and ingestor ([#2200](https://github.com/jaegertracing/jaeger-operator/pull/2200), [@AhmedGrati](https://github.com/AhmedGrati))
## v1.43.0 (2023-02-07)
* update operator-sdk to 1.27.0 ([#2178](https://github.com/jaegertracing/jaeger-operator/pull/2178), [@iblancasa](https://github.com/iblancasa))
* Support JaegerCommonSpec in JaegerCassandraCreateSchemaSpec ([#2176](https://github.com/jaegertracing/jaeger-operator/pull/2176), [@haanhvu](https://github.com/haanhvu))
## v1.42.0 (2023-02-07)
* Upgrade Kafka Operator default version to 0.32.0 ([#2150](https://github.com/jaegertracing/jaeger-operator/pull/2150), [@iblancasa](https://github.com/iblancasa))
* Upgrade Kind, Kind images and add Kubernetes 1.26 ([#2161](https://github.com/jaegertracing/jaeger-operator/pull/2161), [@iblancasa](https://github.com/iblancasa))
1.41.1 (2023-01-23)
-------------------

View File

@ -1,34 +1,18 @@
The following table shows the compatibility of Jaeger Operator with three different components: Kubernetes, Strimzi Operator, and Cert-Manager.
The following table shows the compatibility of jaeger operator with different components, in this particular case we shows Kubernetes and Strimzi operator compatibility
| Jaeger Operator | Kubernetes | Strimzi Operator | Cert-Manager |
|-----------------|----------------|--------------------|--------------|
| v1.62.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.61.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.60.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.59.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.58.x | skipped | skipped | skipped |
| v1.57.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.56.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.55.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.54.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.53.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.52.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.51.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.50.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.49.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.48.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.47.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.46.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.45.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.44.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.43.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.42.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.41.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.40.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.39.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.38.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.37.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.36.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.35.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
|-----------------|-----------------|--------------------|--------------|
| v1.34.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.33.x | v1.19 to v1.23 | v0.23 | v1.6.1 |
| v1.32.x | v1.19 to v1.21 | v0.23 | v1.6.1 |
| v1.31.x | v1.19 to v1.21 | v0.23 | v1.6.1 |
| v1.30.x | v1.19 to v1.21 | v0.23 | |
| v1.29.x | v1.19 to v1.21 | v0.23 | |
| v1.28.x | v1.19 to v1.21 | v0.23 | |
| v1.27.x | v1.19 to v1.21 | v0.23 | |
| v1.26.x | v1.19 to v1.21 | v0.23 | |
| v1.25.x | v1.19 to v1.21 | v0.23 | |
| v1.24.x | v1.19 to v1.21 | v0.23 | |
| v1.23.x | v1.19 to v1.21 | v0.19, v0.20 | |
| v1.22.x | v1.18 to v1.20 | v0.19 | |

View File

@ -183,8 +183,9 @@ difference are:
* You need to log in your Kubernetes cluster before running the E2E tests
* You need to provide the `USE_KIND_CLUSTER=false` parameter when calling `make`
For instance, to run the `examples` E2E test suite in OpenShift, the command is:
```sh
$ make run-e2e-tests USE_KIND_CLUSTER=false
$ make run-e2e-tests-examples USE_KIND_CLUSTER=false
```
### Developing new E2E tests

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.18 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
@ -21,7 +21,6 @@ COPY pkg/ pkg/
COPY versions.txt versions.txt
ARG JAEGER_VERSION
ARG JAEGER_AGENT_VERSION
ARG VERSION_PKG
ARG VERSION
ARG VERSION_DATE
@ -33,17 +32,17 @@ ARG VERSION_DATE
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
ARG TARGETARCH
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION} -X ${VERSION_PKG}.defaultAgent=${JAEGER_AGENT_VERSION}" -a -o jaeger-operator main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION}" -a -o jaeger-operator main.go
FROM quay.io/centos/centos:stream9
FROM registry.access.redhat.com/ubi8/ubi
ENV USER_UID=1001 \
USER_NAME=jaeger-operator
RUN INSTALL_PKGS="openssl" && \
dnf install -y $INSTALL_PKGS && \
yum install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
dnf clean all && \
yum clean all && \
mkdir /tmp/_working_dir && \
chmod og+w /tmp/_working_dir

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.18 as builder
WORKDIR /workspace
@ -27,7 +27,7 @@ RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./index -a ./tests/assert-
# Use the curl container image to ensure we have curl installed. Also, it is a
# minimal container image
FROM curlimages/curl@sha256:94e9e444bcba979c2ea12e27ae39bee4cd10bc7041a472c4727a558e213744e6
FROM curlimages/curl:7.81.0
WORKDIR /
COPY --from=builder /workspace/reporter .
COPY --from=builder /workspace/reporter-otlp .

View File

@ -29,11 +29,6 @@ BUNDLE_IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}-bundle:$(addprefix v,${VERSION})
OUTPUT_BINARY ?= "$(BIN_DIR)/jaeger-operator"
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
export JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
# agent was removed in jaeger 1.62.0, and the new versions of jaeger doesn't distribute the images anymore
# for that reason the last version of the agent is 1.62.0 and is pined here so we can update jaeger and maintain
# the latest agent image.
export JAEGER_AGENT_VERSION ?= "1.62.0"
# Kafka and Kafka Operator variables
STORAGE_NAMESPACE ?= "${shell kubectl get sa default -o jsonpath='{.metadata.namespace}' || oc project -q}"
KAFKA_NAMESPACE ?= "kafka"
@ -56,10 +51,7 @@ CERTMANAGER_VERSION ?= 1.6.1
CMCTL ?= $(LOCALBIN)/cmctl
# Operator SDK
OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
OPERATOR_SDK_VERSION ?= 1.32.0
# Minimum Kubernetes and OpenShift versions
MIN_KUBERNETES_VERSION ?= 1.19.0
MIN_OPENSHIFT_VERSION ?= 4.12
OPERATOR_SDK_VERSION ?= 1.23.0
# Use a KIND cluster for the E2E tests
USE_KIND_CLUSTER ?= true
# Is Jaeger Operator installed via OLM?
@ -86,13 +78,13 @@ else
GOBIN=$(shell go env GOBIN)
endif
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION) -X $(VERSION_PKG).defaultAgent=$(JAEGER_AGENT_VERSION)"
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION)"
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST ?= $(LOCALBIN)/setup-envtest
ENVTEST_K8S_VERSION = 1.30
ENVTEST_K8S_VERSION = 1.26
# Options for KIND version to use
export KUBE_VERSION ?= 1.30
export KUBE_VERSION ?= 1.26
KIND_CONFIG ?= kind-$(KUBE_VERSION).yaml
SCORECARD_TEST_IMG ?= quay.io/operator-framework/scorecard-test:v$(OPERATOR_SDK_VERSION)
@ -136,7 +128,7 @@ ensure-generate-is-noop: set-image-controller generate bundle
.PHONY: format
format: install-tools
format:
$(ECHO) Formatting code...
$(VECHO)./.ci/format.sh
@ -157,11 +149,11 @@ build: format
.PHONY: docker
docker:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
.PHONY: dockerx
dockerx:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
.PHONY: push
push:
@ -175,7 +167,7 @@ endif
.PHONY: unit-tests
unit-tests: envtest
@echo Running unit tests...
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -p 1 ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -p 1 ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
.PHONY: set-node-os-linux
set-node-os-linux:
@ -357,16 +349,10 @@ $(ENVTEST): $(LOCALBIN)
.PHONY: bundle
bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
$(SED) -i "s#containerImage: quay.io/jaegertracing/jaeger-operator:$(OPERATOR_VERSION)#containerImage: quay.io/jaegertracing/jaeger-operator:$(VERSION)#g" config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/minKubeVersion: .*/minKubeVersion: $(MIN_KUBERNETES_VERSION)/' config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/com.redhat.openshift.versions=.*/com.redhat.openshift.versions=v$(MIN_OPENSHIFT_VERSION)/' bundle.Dockerfile
$(SED) -i 's/com.redhat.openshift.versions: .*/com.redhat.openshift.versions: v$(MIN_OPENSHIFT_VERSION)/' bundle/metadata/annotations.yaml
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --manifests --version $(VERSION) $(BUNDLE_METADATA_OPTS)
$(OPERATOR_SDK) bundle validate ./bundle
./hack/ignore-createdAt-bundle.sh
.PHONY: bundle-build
bundle-build: ## Build the bundle image.

136
README.md
View File

@ -1,4 +1,4 @@
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc] [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/jaegertracing/jaeger-operator/badge)](https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger-operator)
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc]
# Jaeger Operator for Kubernetes
@ -67,143 +67,11 @@ The jaeger Operator *might* work on other untested versions of Strimzi Operator,
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml) to the namespace `jaeger-test`:
```bash
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:main generate | kubectl apply -n jaeger-test -f -
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:master generate | kubectl apply -n jaeger-test -f -
```
It is recommended to deploy the operator instead of generating a static manifest.
## Jaeger V2 Operator
As the Jaeger V2 is released, it is decided that Jaeger V2 will deployed on Kubernetes using [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator). This will benefit both the users of Jaeger and OpenTelemetry. To use Jaeger V2 with OpenTelemetry Operator, the steps are as follows:
* Install the cert-manager in the existing cluster with the command:
```bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `cert-manager` namespace.
* Install the OpenTelemetry Operator by running:
```bash
kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `opentelemetry-operator-system` namespace.
### Using Jaeger with in-memory storage
Once all the resources are ready, create a Jaeger instance as follows:
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-inmemory-instance
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
service:
extensions: [jaeger_storage, jaeger_query]
pipelines:
traces:
receivers: [otlp]
exporters: [jaeger_storage_exporter]
extensions:
jaeger_query:
storage:
traces: memstore
jaeger_storage:
backends:
memstore:
memory:
max_traces: 100000
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
jaeger_storage_exporter:
trace_storage: memstore
EOF
```
To use the in-memory storage ui for Jaeger V2, expose the pod, deployment or the service as follows:
```bash
kubectl port-forward deployment/jaeger-inmemory-instance-collector 8080:16686
```
Or
```bash
kubectl port-forward service/jaeger-inmemory-instance-collector 8080:16686
```
Once done, type `localhost:8080` in the browser to interact with the UI.
[Note] There's an ongoing development in OpenTelemetry Operator where users will be able to interact directly with the UI.
### Using Jaeger with database to store traces
To use Jaeger V2 with the supported database, it is mandatory to create database deployments and they should be in `ready` state [(ref)](https://www.jaegertracing.io/docs/2.0/storage/).
Create a Kubernetes Service that exposes the database pods enabling communication between the database and Jaeger pods.
This can be achieved by creating a service in two ways, first by creating it [manually](https://kubernetes.io/docs/concepts/services-networking/service/) or second by creating it using imperative command.
```bash
kubectl expose pods <pod-name> --port=<port-number> --name=<name-of-the-service>
```
Or
```bash
kubectl expose deployment <deployment-name> --port=<port-number> --name=<name-of-the-service>
```
After the service is created, add the name of the service as an endpoint in their respective config as follows:
* [Cassandra DB](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-cassandra.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
cassandra:
connection:
servers: [<name-of-the-service>]
```
* [ElasticSearch](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-elasticsearch.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
elasticseacrh:
servers: [<name-of-the-service>]
```
Use the modified config to create Jaeger instance with the help of OpenTelemetry Operator.
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-storage-instance # name of your choice
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
# modified config
EOF
```
## Contributing and Developing
Please see [CONTRIBUTING.md](CONTRIBUTING.md).

View File

@ -16,16 +16,12 @@ Steps to release a new version of the Jaeger Operator:
1. Change the `versions.txt `so that it lists the target version of the Jaeger (if it is required). **Don't touch the operator version**: it will be changed automatically in the next step.
2. Confirm that `MIN_KUBERNETES_VERSION` and `MIN_OPENSHIFT_VERSION` in the `Makefile` are still up-to-date, and update them if required.
2. Run `OPERATOR_VERSION=1.30.0 make prepare-release`, using the operator version that will be released.
3. Run the E2E tests in OpenShift as described in [the CONTRIBUTING.md](CONTRIBUTING.md#an-external-cluster-like-openshift) file. The tests will be executed automatically in Kubernetes by the GitHub Actions CI later.
4. Prepare a changelog since last release.
4. Update the release manager schedule.
5. Commit the changes and create a pull request:
```sh
@ -59,14 +55,3 @@ After the PRs have been made it must be ensured that:
- Images listed in the ClusterServiceVersion (CSV) have a versions tag [#1682](https://github.com/jaegertracing/jaeger-operator/issues/1682)
- No `bundle` folder is included in the release
- No foreign CRs like prometheus are in the manifests
## Release managers
The operator should be released within a week after the [Jaeger release](https://github.com/jaegertracing/jaeger/blob/main/RELEASE.md#release-managers).
| Version | Release Manager |
|---------| -------------------------------------------------------- |
| 1.63.0 | [Benedikt Bongartz](https://github.com/frzifus) |
| 1.64.0 | [Pavol Loffay](https://github.com/pavolloffay) |
| 1.65.0 | [Israel Blancas](https://github.com/iblancasa) |
| 1.66.0 | [Ruben Vargas](https://github.com/rubenvp8510) |

View File

@ -5,7 +5,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUnmarshalJSON(t *testing.T) {
@ -29,7 +28,7 @@ func TestUnmarshalJSON(t *testing.T) {
t.Run(name, func(t *testing.T) {
ds := DeploymentStrategy("")
err := json.Unmarshal([]byte(tc.json), &ds)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, tc.expected, ds)
})
}
@ -49,7 +48,7 @@ func TestMarshalJSON(t *testing.T) {
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
data, err := json.Marshal(tc.strategy)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, tc.expected, string(data))
})
}

View File

@ -4,7 +4,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFreeForm(t *testing.T) {
@ -17,7 +16,7 @@ func TestFreeForm(t *testing.T) {
},
})
json, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.NotNil(t, json)
assert.Equal(t, uiconfig, string(*o.json))
}
@ -27,7 +26,7 @@ func TestFreeFormUnmarhalMarshal(t *testing.T) {
o := NewFreeForm(nil)
o.UnmarshalJSON([]byte(uiconfig))
json, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.NotNil(t, json)
assert.Equal(t, uiconfig, string(*o.json))
}
@ -67,9 +66,9 @@ func TestToMap(t *testing.T) {
f := NewFreeForm(test.m)
got, err := f.GetMap()
if test.err != "" {
require.EqualError(t, err, test.err)
assert.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, test.expected, got)
}
}

View File

@ -36,36 +36,36 @@ const (
// FlagAutoscalingVersionV2Beta2 represents the v2beta2 version of the Kubernetes Autoscaling API, no longer available as of 1.26
FlagAutoscalingVersionV2Beta2 = "autoscaling/v2beta2"
// FlagPlatform represents the flag to set the platform
FlagPlatform = "platform"
// FlagPlatformKubernetes represents the value for the 'platform' flag for Kubernetes
FlagPlatformKubernetes = "kubernetes"
// FlagPlatformOpenShift represents the value for the 'platform' flag for OpenShift
FlagPlatformOpenShift = "openshift"
// FlagPlatformAutoDetect represents the "auto-detect" value for the platform flag
FlagPlatformAutoDetect = "auto-detect"
// FlagESProvision represents the 'es-provision' flag
FlagESProvision = "es-provision"
// FlagProvisionElasticsearchAuto represents the 'auto' value for the 'es-provision' flag
FlagProvisionElasticsearchAuto = "auto"
// FlagProvisionElasticsearchYes represents the value 'yes' for the 'es-provision' flag
FlagProvisionElasticsearchYes = "yes"
// FlagProvisionElasticsearchNo represents the value 'no' for the 'es-provision' flag
FlagProvisionElasticsearchNo = "no"
// FlagProvisionKafkaAuto represents the 'auto' value for the 'kafka-provision' flag
FlagProvisionKafkaAuto = "auto"
// FlagKafkaProvision represents the 'kafka-provision' flag.
FlagKafkaProvision = "kafka-provision"
// FlagProvisionKafkaYes represents the value 'yes' for the 'kafka-provision' flag
FlagProvisionKafkaYes = "yes"
// FlagAuthDelegatorAvailability represents the 'auth-delegator-available' flag.
FlagAuthDelegatorAvailability = "auth-delegator-available"
// FlagOpenShiftOauthProxyImage represents the 'openshift-oauth-proxy-image' flag.
FlagOpenShiftOauthProxyImage = "openshift-oauth-proxy-image"
// FlagProvisionKafkaNo represents the value 'no' for the 'kafka-provision' flag
FlagProvisionKafkaNo = "no"
// IngressSecurityNone disables any form of security for ingress objects (default)
IngressSecurityNone IngressSecurityType = ""
// FlagDefaultIngressClass represents the default Ingress class from the cluster
FlagDefaultIngressClass = "default-ingressclass"
// IngressSecurityNoneExplicit used when the user specifically set it to 'none'
IngressSecurityNoneExplicit IngressSecurityType = "none"
@ -292,10 +292,6 @@ type JaegerQuerySpec struct {
// +optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Strategy"
Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
}
// JaegerUISpec defines the options to be used to configure the UI
@ -374,10 +370,6 @@ type JaegerIngressOpenShiftSpec struct {
// SkipLogout tells the operator to not automatically add a "Log Out" menu option to the custom Jaeger configuration
// +optional
SkipLogout *bool `json:"skipLogout,omitempty"`
// Timeout defines client timeout from oauth-proxy to jaeger.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
}
// JaegerAllInOneSpec defines the options to be used when deploying the query
@ -408,9 +400,6 @@ type JaegerAllInOneSpec struct {
// +optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Strategy"
Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"`
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// AutoScaleSpec defines the common elements used for create HPAs
@ -466,16 +455,6 @@ type JaegerCollectorSpec struct {
// +optional
KafkaSecretName string `json:"kafkaSecretName"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// +optional
Lifecycle *v1.Lifecycle `json:"lifecycle,omitempty"`
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
}
// JaegerIngesterSpec defines the options to be used when deploying the ingester
@ -506,10 +485,6 @@ type JaegerIngesterSpec struct {
// +optional
KafkaSecretName string `json:"kafkaSecretName"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
}
// JaegerAgentSpec defines the options to be used when deploying the agent
@ -580,9 +555,6 @@ type JaegerStorageSpec struct {
type JaegerMetricsStorageSpec struct {
// +optional
Type JaegerStorageType `json:"type,omitempty"`
// +optional
ServerUrl string `json:"server-url,omitempty"`
}
// ElasticsearchSpec represents the ES configuration options that we pass down to the OpenShift Elasticsearch operator.

View File

@ -12,7 +12,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
const (
@ -35,6 +35,8 @@ func (j *Jaeger) SetupWebhookWithManager(mgr ctrl.Manager) error {
//+kubebuilder:webhook:path=/mutate-jaegertracing-io-v1-jaeger,mutating=true,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=mjaeger.kb.io,admissionReviewVersions={v1}
var _ webhook.Defaulter = &Jaeger{}
func (j *Jaeger) objsWithOptions() []*Options {
return []*Options{
&j.Spec.AllInOne.Options, &j.Spec.Query.Options, &j.Spec.Collector.Options,
@ -45,7 +47,6 @@ func (j *Jaeger) objsWithOptions() []*Options {
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (j *Jaeger) Default() {
jaegerlog.Info("default", "name", j.Name)
jaegerlog.Info("WARNING jaeger-agent is deprecated and will removed in v1.55.0. See https://github.com/jaegertracing/jaeger/issues/4739", "component", "agent")
if j.Spec.Storage.Elasticsearch.Name == "" {
j.Spec.Storage.Elasticsearch.Name = defaultElasticsearchName
@ -82,14 +83,16 @@ func (j *Jaeger) Default() {
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-jaegertracing-io-v1-jaeger,mutating=false,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=vjaeger.kb.io,admissionReviewVersions={v1}
var _ webhook.Validator = &Jaeger{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateCreate() (admission.Warnings, error) {
func (j *Jaeger) ValidateCreate() error {
jaegerlog.Info("validate create", "name", j.Name)
return j.ValidateUpdate(nil)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
func (j *Jaeger) ValidateUpdate(_ runtime.Object) error {
jaegerlog.Info("validate update", "name", j.Name)
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
@ -100,24 +103,24 @@ func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
Name: j.Spec.Storage.Elasticsearch.Name,
}, es)
if errors.IsNotFound(err) {
return nil, fmt.Errorf("elasticsearch instance not found: %w", err)
return fmt.Errorf("elasticsearch instance not found: %v", err)
}
}
for _, opt := range j.objsWithOptions() {
got := opt.DeepCopy().ToArgs()
if f := getAdditionalTLSFlags(got); f != nil {
return nil, fmt.Errorf("tls flags incomplete, got: %v", got)
return fmt.Errorf("tls flags incomplete, got: %v", got)
}
}
return nil, nil
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateDelete() (admission.Warnings, error) {
func (j *Jaeger) ValidateDelete() error {
jaegerlog.Info("validate delete", "name", j.Name)
return nil, nil
return nil
}
// OpenShiftElasticsearchNodeCount returns total node count of Elasticsearch nodes.

View File

@ -4,21 +4,15 @@ import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
_ webhook.Defaulter = &Jaeger{}
_ webhook.Validator = &Jaeger{}
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func TestDefault(t *testing.T) {
@ -171,8 +165,8 @@ func TestDefault(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
esv1.AddToScheme(scheme.Scheme)
AddToScheme(scheme.Scheme)
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objs...).Build()
cl = fakeCl
@ -183,9 +177,7 @@ func TestDefault(t *testing.T) {
}
func TestValidateDelete(t *testing.T) {
warnings, err := new(Jaeger).ValidateDelete()
assert.Nil(t, warnings)
require.NoError(t, err)
assert.Nil(t, new(Jaeger).ValidateDelete())
}
func TestValidate(t *testing.T) {
@ -278,19 +270,18 @@ func TestValidate(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
esv1.AddToScheme(scheme.Scheme)
AddToScheme(scheme.Scheme)
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objsToCreate...).Build()
cl = fakeCl
warnings, err := test.current.ValidateCreate()
err := test.current.ValidateCreate()
if test.err != "" {
require.Error(t, err)
assert.NotNil(t, err)
assert.Equal(t, test.err, err.Error())
} else {
require.NoError(t, err)
assert.Nil(t, err)
}
assert.Nil(t, warnings)
})
}
}

View File

@ -18,7 +18,7 @@ func TestSimpleOption(t *testing.T) {
func TestNoOptions(t *testing.T) {
o := Options{}
assert.Empty(t, o.ToArgs())
assert.Len(t, o.ToArgs(), 0)
}
func TestNestedOption(t *testing.T) {
@ -40,7 +40,7 @@ func TestMarshalling(t *testing.T) {
})
b, err := json.Marshal(o)
require.NoError(t, err)
assert.NoError(t, err)
s := string(b)
assert.Contains(t, s, `"es.password":"changeme"`)
assert.Contains(t, s, `"es.server-urls":"http://elasticsearch.default.svc:9200"`)
@ -85,9 +85,9 @@ func TestUnmarshalToArgs(t *testing.T) {
opts := Options{}
err := opts.UnmarshalJSON([]byte(test.in))
if test.err != "" {
require.EqualError(t, err, test.err)
assert.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
assert.NoError(t, err)
args := opts.ToArgs()
sort.SliceStable(args, func(i, j int) bool {
return args[i] < args[j]
@ -129,7 +129,7 @@ func TestMarshallRaw(t *testing.T) {
o := NewOptions(nil)
o.json = &json
bytes, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, bytes, json)
}
@ -137,7 +137,7 @@ func TestMarshallEmpty(t *testing.T) {
o := NewOptions(nil)
json := []byte(`{}`)
bytes, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, bytes, json)
}
@ -151,7 +151,7 @@ func TestUpdate(t *testing.T) {
o.Map()["key"] = "new"
// verify
assert.Equal(t, "new", o.opts["key"])
assert.Equal(t, o.opts["key"], "new")
}
func TestStringMap(t *testing.T) {
@ -170,7 +170,7 @@ func TestDeepCopy(t *testing.T) {
require.NoError(t, err)
copy := o1.opts.DeepCopy()
assert.Equal(t, &(o1.opts), copy)
assert.Equal(t, copy, &(o1.opts))
}
func TestRepetitiveArguments(t *testing.T) {

View File

@ -1,4 +1,5 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
@ -7,7 +8,6 @@ package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@ -256,23 +256,6 @@ func (in *JaegerCollectorSpec) DeepCopyInto(out *JaegerCollectorSpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerCollectorSpec.
@ -496,13 +479,6 @@ func (in *JaegerIngesterSpec) DeepCopyInto(out *JaegerIngesterSpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerIngesterSpec.
@ -528,11 +504,6 @@ func (in *JaegerIngressOpenShiftSpec) DeepCopyInto(out *JaegerIngressOpenShiftSp
*out = new(bool)
**out = **in
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerIngressOpenShiftSpec.
@ -673,13 +644,6 @@ func (in *JaegerQuerySpec) DeepCopyInto(out *JaegerQuerySpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerQuerySpec.

View File

@ -11,9 +11,6 @@ LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.13.0+git
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
# OpenShift specific labels.
LABEL com.redhat.openshift.versions=v4.12
# Copy files to locations specified by labels.
COPY bundle/manifests /manifests/
COPY bundle/metadata /metadata/

View File

@ -18,16 +18,16 @@ metadata:
capabilities: Deep Insights
categories: Logging & Tracing
certified: "false"
containerImage: quay.io/jaegertracing/jaeger-operator:1.62.0
createdAt: "2025-01-22T20:40:19Z"
containerImage: quay.io/jaegertracing/jaeger-operator
createdAt: "2019-09-04T13:28:40+00:00"
description: Provides tracing, monitoring and troubleshooting for microservices-based
distributed systems
operators.openshift.io/infrastructure-features: '["disconnected"]'
operators.operatorframework.io/builder: operator-sdk-v1.32.0
operators.operatorframework.io/builder: operator-sdk-v1.23.0
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/jaegertracing/jaeger-operator
support: Jaeger Community
name: jaeger-operator.v1.65.0
name: jaeger-operator.v1.42.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@ -336,7 +336,6 @@ spec:
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- create
@ -370,12 +369,6 @@ spec:
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
serviceAccountName: jaeger-operator
deployments:
- label:
@ -410,11 +403,7 @@ spec:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: jaeger-operator
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
image: quay.io/jaegertracing/jaeger-operator:1.65.0
image: quay.io/jaegertracing/jaeger-operator:1.42.0
livenessProbe:
httpGet:
path: /healthz
@ -433,6 +422,9 @@ spec:
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
@ -447,7 +439,7 @@ spec:
- --upstream=http://127.0.0.1:8383/
- --logtostderr=true
- --v=0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
@ -524,14 +516,13 @@ spec:
- email: jaeger-tracing@googlegroups.com
name: Jaeger Google Group
maturity: alpha
minKubeVersion: 1.19.0
provider:
name: CNCF
replaces: jaeger-operator.v1.62.0
replaces: jaeger-operator.v1.41.1
selector:
matchLabels:
name: jaeger-operator
version: 1.65.0
version: 1.42.0
webhookdefinitions:
- admissionReviewVersions:
- v1

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,3 @@ annotations:
operators.operatorframework.io.metrics.builder: operator-sdk-v1.13.0+git
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
# OpenShift annotations
com.redhat.openshift.versions: v4.12

View File

@ -8,7 +8,7 @@ stages:
- entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: basic
test: basic-check-spec-test
@ -18,7 +18,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-bundle-validation-test
@ -28,7 +28,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-crds-have-validation-test
@ -38,7 +38,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-crds-have-resources-test
@ -48,7 +48,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-spec-descriptors-test
@ -58,7 +58,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-status-descriptors-test

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@ spec:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8383/"

View File

@ -5,4 +5,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/jaegertracing/jaeger-operator
newTag: 1.65.0
newTag: 1.42.0

View File

@ -37,6 +37,9 @@ spec:
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
@ -51,33 +54,5 @@ spec:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: "jaeger-operator"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
serviceAccountName: jaeger-operator
terminationGracePeriodSeconds: 10

View File

@ -15,8 +15,8 @@ metadata:
capabilities: Deep Insights
categories: Logging & Tracing
certified: "false"
containerImage: quay.io/jaegertracing/jaeger-operator:1.62.0
createdAt: "2023-05-16T04:47:12Z"
containerImage: quay.io/jaegertracing/jaeger-operator
createdAt: "2019-09-04T13:28:40+00:00"
description: Provides tracing, monitoring and troubleshooting for microservices-based
distributed systems
operators.openshift.io/infrastructure-features: '["disconnected"]'
@ -122,10 +122,9 @@ spec:
- email: jaeger-tracing@googlegroups.com
name: Jaeger Google Group
maturity: alpha
minKubeVersion: 1.19.0
provider:
name: CNCF
replaces: jaeger-operator.v1.62.0
replaces: jaeger-operator.v1.41.1
selector:
matchLabels:
name: jaeger-operator

View File

@ -2,6 +2,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
@ -221,7 +222,6 @@ rules:
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- create
@ -255,9 +255,3 @@ rules:
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create

View File

@ -2,6 +2,7 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
@ -48,6 +49,7 @@ webhooks:
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@ -25,13 +25,15 @@ import (
"github.com/jaegertracing/jaeger-operator/pkg/tracing"
)
var _ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
var (
_ admission.DecoderInjector = (*deploymentInterceptor)(nil)
_ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
)
// NewDeploymentInterceptorWebhook creates a new deployment mutating webhook to be registered
func NewDeploymentInterceptorWebhook(c client.Client, decoder *admission.Decoder) webhook.AdmissionHandler {
func NewDeploymentInterceptorWebhook(c client.Client) webhook.AdmissionHandler {
return &deploymentInterceptor{
client: c,
decoder: decoder,
}
}
@ -136,9 +138,7 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
span.AddEvent(msg)
}
envConfigMaps := corev1.ConfigMapList{}
d.client.List(ctx, &envConfigMaps, client.InNamespace(dep.Namespace))
dep = inject.Sidecar(jaeger, dep, inject.WithEnvFromConfigMaps(inject.GetConfigMapsMatchedEnvFromInDeployment(*dep, envConfigMaps.Items)))
dep = inject.Sidecar(jaeger, dep)
marshaledDeploy, err := json.Marshal(dep)
if err != nil {
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))

View File

@ -24,7 +24,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
@ -94,7 +93,8 @@ func TestReconcileConfigMaps(t *testing.T) {
errors: tC.errors,
}
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
// test
err := reconcileConfigMaps(context.Background(), cl, jaeger, &dep)
@ -244,7 +244,7 @@ func TestReconcilieDeployment(t *testing.T) {
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "is jaeger deployment, we do not touch it",
Reason: "is jaeger deployment, we do not touch it",
Code: 200,
},
},
@ -314,7 +314,7 @@ func TestReconcilieDeployment(t *testing.T) {
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "no suitable Jaeger instances found to inject a sidecar",
Reason: "no suitable Jaeger instances found to inject a sidecar",
Code: 200,
},
},
@ -369,7 +369,7 @@ func TestReconcilieDeployment(t *testing.T) {
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "not watching in namespace, we do not touch the deployment",
Reason: "not watching in namespace, we do not touch the deployment",
Code: 200,
},
},
@ -398,8 +398,7 @@ func TestReconcilieDeployment(t *testing.T) {
errors: tc.errors,
}
decoder := admission.NewDecoder(scheme.Scheme)
r := NewDeploymentInterceptorWebhook(cl, decoder)
r := NewDeploymentInterceptorWebhook(cl)
req := admission.Request{}
if !tc.emptyRequest {
@ -420,6 +419,9 @@ func TestReconcilieDeployment(t *testing.T) {
}
}
decoder, err := admission.NewDecoder(s)
require.NoError(t, err)
admission.InjectDecoderInto(decoder, r)
resp := r.Handle(context.Background(), req)
assert.Len(t, resp.Patches, len(tc.resp.Patches))
@ -431,6 +433,8 @@ func TestReconcilieDeployment(t *testing.T) {
})
assert.Equal(t, tc.resp, resp)
require.NoError(t, err)
})
}
}

View File

@ -1,16 +1,13 @@
package appsv1_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/appsv1"
)
@ -30,26 +27,5 @@ func TestNamespaceControllerRegisterWithManager(t *testing.T) {
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewNamespaceInstance(t *testing.T) {
// prepare
nsn := types.NamespacedName{Name: "my-instance", Namespace: "default"}
reconciler := appsv1.NewNamespaceReconciler(
k8sClient,
k8sClient,
testScheme,
)
instance := v1.NewJaeger(nsn)
err := k8sClient.Create(context.Background(), instance)
require.NoError(t, err)
req := k8sreconcile.Request{
NamespacedName: nsn,
}
_, err = reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
assert.NoError(t, err)
}

View File

@ -1,77 +0,0 @@
package elasticsearch_test
import (
"context"
"testing"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/elasticsearch"
)
func TestElasticSearchSetupWithManager(t *testing.T) {
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
// prepare
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
require.NoError(t, err)
reconciler := elasticsearch.NewReconciler(
k8sClient,
k8sClient,
)
// test
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewElasticSearchInstance(t *testing.T) {
// prepare
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ns",
},
}
es := &esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{
Name: "test-es",
Namespace: "test-ns",
},
}
jaeger := v1.NewJaeger(types.NamespacedName{
Name: "test-jaeger",
Namespace: "test-jaeger",
})
esv1.AddToScheme(testScheme)
v1.AddToScheme(testScheme)
client := fake.NewClientBuilder().WithRuntimeObjects(ns, es, jaeger).Build()
reconciler := elasticsearch.NewReconciler(
client,
client,
)
req := k8sreconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-es",
Namespace: "test-ns",
},
}
_, err := reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
}

View File

@ -1,57 +0,0 @@
package elasticsearch_test
import (
"fmt"
"os"
"path/filepath"
"testing"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
cfg, err := testEnv.Start()
if err != nil {
fmt.Printf("failed to start testEnv: %v", err)
os.Exit(1)
}
if err := v1.AddToScheme(scheme.Scheme); err != nil {
fmt.Printf("failed to register scheme: %v", err)
os.Exit(1)
}
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
if err != nil {
fmt.Printf("failed to setup a Kubernetes client: %v", err)
os.Exit(1)
}
code := m.Run()
err = testEnv.Stop()
if err != nil {
fmt.Printf("failed to stop testEnv: %v", err)
os.Exit(1)
}
os.Exit(code)
}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/operator-framework/operator-lib/handler"
@ -48,9 +49,8 @@ func NewReconciler(client client.Client, clientReader client.Reader, scheme *run
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses;ingressclasses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=create
// +kubebuilder:rbac:groups=console.openshift.io,resources=consolelinks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch,resources=jobs;cronjobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete
@ -69,10 +69,7 @@ func (r *JaegerReconciler) Reconcile(ctx context.Context, request ctrl.Request)
func (r *JaegerReconciler) SetupWithManager(mgr ctrl.Manager) error {
err := ctrl.NewControllerManagedBy(mgr).
For(&v1.Jaeger{}).
Watches(
&v1.Jaeger{},
&handler.InstrumentedEnqueueRequestForObject{},
).
Watches(&source.Kind{Type: &v1.Jaeger{}}, &handler.InstrumentedEnqueueRequestForObject{}).
Complete(r)
return err
}

View File

@ -4,6 +4,7 @@ import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
@ -51,5 +52,5 @@ func TestRegisterWithManager(t *testing.T) {
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
assert.NoError(t, err)
}

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ metadata:
spec:
strategy: allInOne
allInOne:
image: jaegertracing/all-in-one:1.65.0
image: jaegertracing/all-in-one:1.42.0
options:
log-level: debug
query:

View File

@ -1,17 +0,0 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority # priorityClassName here
value: 1000000
globalDefault: false
description: "This priority class should be used for XYZ service pods only."
---
apiVersion: jaegertracing.io/v1
kind: "Jaeger"
metadata:
name: "my-jaeger"
spec:
strategy: allInOne
allInOne:
image: jaegertracing/all-in-one:1.30.0
priorityClassName: high-priority # priorityClassName here

View File

@ -17,7 +17,7 @@ spec:
serviceAccountName: jaeger-operator
containers:
- name: jaeger-operator
image: jaegertracing/jaeger-operator:1.65.0
image: jaegertracing/jaeger-operator:1.42.0
ports:
- containerPort: 8383
name: http-metrics
@ -41,7 +41,7 @@ spec:
- name: OPERATOR_NAME
value: "jaeger-operator"
- name: jaeger-agent
image: jaegertracing/jaeger-agent:1.62.0
image: jaegertracing/jaeger-agent:1.42.0
env:
- name: POD_NAMESPACE
valueFrom:

View File

@ -23,7 +23,7 @@ spec:
- containerPort: 8080
protocol: TCP
- name: jaeger-agent
image: jaegertracing/jaeger-agent:1.62.0
image: jaegertracing/jaeger-agent:1.42.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5775

View File

@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: tracegen
image: jaegertracing/jaeger-tracegen:1.65.0
image: jaegertracing/jaeger-tracegen:1.42.0
args:
- -duration=30m
- -workers=10

166
go.mod
View File

@ -1,116 +1,114 @@
module github.com/jaegertracing/jaeger-operator
go 1.22.0
go 1.18
require (
github.com/Masterminds/semver v1.5.0
github.com/go-logr/logr v1.4.2
github.com/google/gnostic-models v0.6.9
github.com/google/go-cmp v0.7.0
github.com/go-logr/logr v1.2.3
github.com/google/gnostic v0.6.9
github.com/google/go-cmp v0.5.9
github.com/mitchellh/go-homedir v1.1.0
github.com/openshift/api v0.0.0-20231206170337-f356bd9e2ff6
github.com/openshift/elasticsearch-operator v0.0.0-20231013125000-a5c132efd4e0
github.com/openshift/library-go v0.0.0-20231130204458-653f82d961a1
github.com/openshift/api v0.0.0-20220124143425-d74727069f6f
github.com/openshift/elasticsearch-operator v0.0.0-20220708171007-a87102296ded
github.com/opentracing/opentracing-go v1.2.0
github.com/operator-framework/operator-lib v0.13.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
github.com/operator-framework/operator-lib v0.11.0
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.1
github.com/uber/jaeger-client-go v2.30.0+incompatible
go.opentelemetry.io/otel v1.35.0
go.opentelemetry.io/otel/exporters/jaeger v1.17.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0
go.opentelemetry.io/otel/exporters/prometheus v0.57.0
go.opentelemetry.io/otel/metric v1.35.0
go.opentelemetry.io/otel/sdk v1.35.0
go.opentelemetry.io/otel/sdk/metric v1.35.0
go.opentelemetry.io/otel/trace v1.35.0
go.uber.org/zap v1.27.0
gomodules.xyz/jsonpatch/v2 v2.4.0
google.golang.org/grpc v1.71.0
k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.3
k8s.io/component-base v0.29.3
sigs.k8s.io/controller-runtime v0.17.3
go.opentelemetry.io/otel v1.12.0
go.opentelemetry.io/otel/exporters/jaeger v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.12.0
go.opentelemetry.io/otel/exporters/prometheus v0.34.0
go.opentelemetry.io/otel/metric v0.34.0
go.opentelemetry.io/otel/sdk v1.12.0
go.opentelemetry.io/otel/sdk/metric v0.34.0
go.opentelemetry.io/otel/trace v1.12.0
go.uber.org/zap v1.24.0
gomodules.xyz/jsonpatch/v2 v2.2.0
google.golang.org/grpc v1.52.3
k8s.io/api v0.25.4
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.25.4
k8s.io/component-base v0.25.4
sigs.k8s.io/controller-runtime v0.13.1
)
require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/codahale/hdrhistogram v0.0.0-00010101000000-000000000000 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/oauth2 v0.26.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/protobuf v1.36.5 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/net v0.4.0 // indirect
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.1.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.2 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 // indirect
k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
k8s.io/apiextensions-apiserver v0.25.0 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace github.com/codahale/hdrhistogram => github.com/HdrHistogram/hdrhistogram-go v1.1.2

964
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
# GitHub action to run the E2E tests.
# For this purpose, it would be a better idea to use a reusable workflow. There
# is some documentation about how to use a local reusable workflow:
# https://github.blog/changelog/2022-01-25-github-actions-reusable-workflows-can-be-referenced-locally/
# But it seems it doesn't work properly:
# https://github.community/t/allow-reusable-workflows-to-be-located-at-arbitrary-locations-and-be-local/212745/7
# So, the CI uses a local GitHub action as a template to run all the tests.
name: Run E2E tests
description: "Run an E2E test suite"
inputs:
testsuite_name:
description: "Name of the test suite to run"
required: true
kube_version:
description: "Kubernetes version to use"
required: true
runs:
using: "composite"
steps:
- name: "Set up Go"
uses: actions/setup-go@v2.1.4
with:
go-version: 1.18
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
install: true
-
name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: e2e-${{ github.sha }}
restore-keys: |
e2e-
- name: "Install KIND"
run: ./hack/install/install-kind.sh
shell: bash
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
shell: bash
- name: "Install gomplate"
run: ./hack/install/install-gomplate.sh
shell: bash
- name: "Install dependencies"
run: make install-tools
shell: bash
- name: "Run E2E ${{ inputs.testsuite_name }} test suite on ${{ inputs.kube_version }}"
env:
VERBOSE: "true"
KUBE_VERSION: "${{ inputs.kube_version }}"
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make run-e2e-tests-${{ inputs.testsuite_name }}
shell: bash
-
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
shell: bash

View File

@ -9,4 +9,3 @@ export EXAMPLES_DIR=$ROOT_DIR/examples
export GOMPLATE=$ROOT_DIR/bin/gomplate
export YQ=$ROOT_DIR/bin/yq
export KUTTL=$ROOT_DIR/bin/kubectl-kuttl
export KUSTOMIZE=$ROOT_DIR/bin/kustomize

View File

@ -1,11 +0,0 @@
#!/bin/bash
# Since operator-sdk 1.26.0, `make bundle` changes the `createdAt` field from the bundle
# even if it is patched:
# https://github.com/operator-framework/operator-sdk/pull/6136
# This code checks if only the createdAt field. If is the only change, it is ignored.
# Else, it will do nothing.
# https://github.com/operator-framework/operator-sdk/issues/6285#issuecomment-1415350333
git diff --quiet -I'^ createdAt: ' bundle
if ((! $?)) ; then
git checkout bundle
fi

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="0.14.0"
VERSION="0.9.2"
echo "Installing controller-gen"

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="1.55.2"
VERSION="1.50.1"
echo "Installing golangci-lint"

View File

@ -1,11 +1,7 @@
#!/bin/bash
echo "Installing kind"
VERSION="0.17.0"
VERSION="0.20.0"
# Kubernetes 1.19 and 1.20 are supported by Kind until 0.17.0
if [ "$KUBE_VERSION" == "1.19" ] || [ "$KUBE_VERSION" == "1.20" ]; then
VERSION="0.17.0"
fi
echo "Installing kind"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="3.10.0"
VERSION="3.6.0"
echo "Installing kubebuilder"

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="4.5.7"
VERSION="4.2.0"
echo "Installing kustomize"

View File

@ -58,7 +58,7 @@ set -e
# The output XML needs some work because it adds "artifacts" as a test case.
# Also, the suites doesn't have a name so, we need to add one.
go install github.com/RH-QE-Distributed-Tracing/junitcli/cmd/junitcli@v1.0.6
go install github.com/RH-QE-Distributed-Tracing/junitcli/cmd/junitcli@v1.0.4
junitcli --suite-name $test_suite_name --report --output $reports_dir/$test_suite_name.xml ./artifacts/kuttl-test.xml
if [ "$KIND_KEEP_CLUSTER" != true ] && [ "$use_kind_cluster" == true ]; then

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093
image: kindest/node:v1.21.14@sha256:9d9eb5fb26b4fbc0c6d95fa8c790414f9750dd583f5d7cee45d92e8c26670aa1
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.22.17@sha256:f5b2e5698c6c9d6d0adc419c0deae21a425c07d81bbf3b6a6834042f25d4fba2
image: kindest/node:v1.22.15@sha256:7d9708c4b0873f0fe2e171e2b1b7f45ae89482617778c1c875f1053d4cef2e41
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb
image: kindest/node:v1.23.13@sha256:ef453bb7c79f0e3caba88d2067d4196f427794086a7d0df8df4f019d5e336b61
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab
image: kindest/node:v1.24.7@sha256:577c630ce8e509131eab1aea12c022190978dd2f745aac5eb1fe65c0807eb315
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8
image: kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
image: kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.30.2@sha256:ecfe5841b9bee4fe9690f49c118c33629fa345e3350a0c67a5a34482a99d6bba
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

View File

@ -1,158 +0,0 @@
package autoclean
import (
"context"
"strings"
"time"
"github.com/spf13/viper"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
type Background struct {
cl client.Client
clReader client.Reader
dcl discovery.DiscoveryInterface
ticker *time.Ticker
}
// New creates a new auto-clean runner
func New(mgr manager.Manager) (*Background, error) {
dcl, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
return WithClients(mgr.GetClient(), dcl, mgr.GetAPIReader()), nil
}
// WithClients builds a new Background with the provided clients
func WithClients(cl client.Client, dcl discovery.DiscoveryInterface, clr client.Reader) *Background {
return &Background{
cl: cl,
dcl: dcl,
clReader: clr,
}
}
// Start initializes the auto-clean process that runs in the background
func (b *Background) Start() {
b.ticker = time.NewTicker(5 * time.Second)
b.autoClean()
go func() {
for {
<-b.ticker.C
b.autoClean()
}
}()
}
// Stop causes the background process to stop auto clean capabilities
func (b *Background) Stop() {
b.ticker.Stop()
}
func (b *Background) autoClean() {
ctx := context.Background()
b.cleanDeployments(ctx)
}
func (b *Background) cleanDeployments(ctx context.Context) {
log.Log.V(-1).Info("cleaning orphaned deployments.")
instancesMap := make(map[string]*v1.Jaeger)
deployments := &appsv1.DeploymentList{}
deployOpts := []client.ListOption{
matchingLabelKeys(map[string]string{inject.Label: ""}),
}
// if we are not watching all namespaces, we have to get items from each namespace being watched
if namespaces := viper.GetString(v1.ConfigWatchNamespace); namespaces != v1.WatchAllNamespaces {
for _, ns := range strings.Split(namespaces, ",") {
nsDeps := &appsv1.DeploymentList{}
if err := b.clReader.List(ctx, nsDeps, append(deployOpts, client.InNamespace(ns))...); err != nil {
log.Log.Error(
err,
"error getting a list of deployments to analyze in namespace",
"namespace", ns,
)
}
deployments.Items = append(deployments.Items, nsDeps.Items...)
instances := &v1.JaegerList{}
if err := b.clReader.List(ctx, instances, client.InNamespace(ns)); err != nil {
log.Log.Error(
err,
"error getting a list of existing jaeger instances in namespace",
"namespace", ns,
)
}
for i := range instances.Items {
instancesMap[instances.Items[i].Name] = &instances.Items[i]
}
}
} else {
if err := b.clReader.List(ctx, deployments, deployOpts...); err != nil {
log.Log.Error(
err,
"error getting a list of deployments to analyze",
)
}
instances := &v1.JaegerList{}
if err := b.clReader.List(ctx, instances); err != nil {
log.Log.Error(
err,
"error getting a list of existing jaeger instances",
)
}
for i := range instances.Items {
instancesMap[instances.Items[i].Name] = &instances.Items[i]
}
}
// check deployments to see which one needs to be cleaned.
for i := range deployments.Items {
dep := deployments.Items[i]
if instanceName, ok := dep.Labels[inject.Label]; ok {
_, instanceExists := instancesMap[instanceName]
if !instanceExists { // Jaeger instance not exist anymore, we need to clean this up.
inject.CleanSidecar(instanceName, &dep)
if err := b.cl.Update(ctx, &dep); err != nil {
log.Log.Error(
err,
"error cleaning orphaned deployment",
"deploymentName", dep.Name,
"deploymentNamespace", dep.Namespace,
)
}
}
}
}
}
type matchingLabelKeys map[string]string
func (m matchingLabelKeys) ApplyToList(opts *client.ListOptions) {
sel := labels.NewSelector()
for k := range map[string]string(m) {
req, err := labels.NewRequirement(k, selection.Exists, []string{})
if err != nil {
log.Log.Error(err, "failed to build label selector")
return
}
sel.Add(*req)
}
opts.LabelSelector = sel
}

View File

@ -1,168 +0,0 @@
package autoclean
import (
"context"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
func TestCleanDeployments(t *testing.T) {
for _, tt := range []struct {
cap string // caption for the test
watchNamespace string // the value for WATCH_NAMESPACE
jaegerNamespace string // in which namespace the jaeger exists, empty for non existing
deleted bool // whether the sidecar should have been deleted
}{
{
cap: "existing-same-namespace",
watchNamespace: "observability",
jaegerNamespace: "observability",
deleted: false,
},
{
cap: "not-existing-same-namespace",
watchNamespace: "observability",
jaegerNamespace: "",
deleted: true,
},
{
cap: "existing-watched-namespace",
watchNamespace: "observability,other-observability",
jaegerNamespace: "other-observability",
deleted: false,
},
{
cap: "existing-non-watched-namespace",
watchNamespace: "observability",
jaegerNamespace: "other-observability",
deleted: true,
},
{
cap: "existing-watching-all-namespaces",
watchNamespace: v1.WatchAllNamespaces,
jaegerNamespace: "other-observability",
deleted: false,
},
} {
t.Run(tt.cap, func(t *testing.T) {
// prepare the test data
viper.Set(v1.ConfigWatchNamespace, tt.watchNamespace)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{
Name: "my-instance",
Namespace: "observability", // at first, it exists in the same namespace as the deployment
})
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "mydep",
Namespace: "observability",
Annotations: map[string]string{inject.Annotation: jaeger.Name},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "C1",
Image: "image1",
},
},
},
},
},
}
dep = inject.Sidecar(jaeger, dep)
// sanity check
require.Len(t, dep.Spec.Template.Spec.Containers, 2)
// prepare the list of existing objects
objs := []runtime.Object{dep}
if len(tt.jaegerNamespace) > 0 {
jaeger.Namespace = tt.jaegerNamespace // now, it exists only in this namespace
objs = append(objs, jaeger)
}
// prepare the client
s := scheme.Scheme
s.AddKnownTypes(v1.GroupVersion, &v1.Jaeger{})
s.AddKnownTypes(v1.GroupVersion, &v1.JaegerList{})
cl := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
b := WithClients(cl, &fakeDiscoveryClient{}, cl)
// test
b.cleanDeployments(context.Background())
// verify
persisted := &appsv1.Deployment{}
err := cl.Get(context.Background(), types.NamespacedName{
Namespace: dep.Namespace,
Name: dep.Name,
}, persisted)
require.NoError(t, err)
// should the sidecar have been deleted?
if tt.deleted {
assert.Len(t, persisted.Spec.Template.Spec.Containers, 1)
assert.NotContains(t, persisted.Labels, inject.Label)
} else {
assert.Len(t, persisted.Spec.Template.Spec.Containers, 2)
assert.Contains(t, persisted.Labels, inject.Label)
}
})
}
}
type fakeDiscoveryClient struct {
discovery.DiscoveryInterface
ServerGroupsFunc func() (apiGroupList *metav1.APIGroupList, err error)
ServerResourcesForGroupVersionFunc func(groupVersion string) (resources *metav1.APIResourceList, err error)
}
func (d *fakeDiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
if d.ServerGroupsFunc == nil {
return &metav1.APIGroupList{}, nil
}
return d.ServerGroupsFunc()
}
func (d *fakeDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) {
if d.ServerGroupsFunc == nil {
return &metav1.APIResourceList{}, nil
}
return d.ServerResourcesForGroupVersionFunc(groupVersion)
}
func (d *fakeDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
return []*metav1.APIResourceList{}, nil
}
func (d *fakeDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
return []*metav1.APIResourceList{}, nil
}
func (d *fakeDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
return []*metav1.APIResourceList{}, nil
}
func (d *fakeDiscoveryClient) ServerVersion() (*version.Info, error) {
return &version.Info{}, nil
}

View File

@ -7,21 +7,19 @@ import (
"sync"
"time"
osimagev1 "github.com/openshift/api/image/v1"
imagereference "github.com/openshift/library-go/pkg/image/reference"
"github.com/spf13/viper"
"go.opentelemetry.io/otel"
appsv1 "k8s.io/api/apps/v1"
authenticationapi "k8s.io/api/authentication/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/tracing"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
var listenedGroupsMap = map[string]bool{"logging.openshift.io": true, "kafka.strimzi.io": true, "route.openshift.io": true}
@ -97,23 +95,20 @@ func (b *Background) autoDetectCapabilities() {
b.firstRun.Do(func() {
// the platform won't change during the execution of the operator, need to run it only once
b.detectPlatform(ctx, apiList)
// the version of the APIs provided by the platform will not change
b.detectCronjobsVersion(ctx)
b.detectAutoscalingVersion(ctx)
b.detectDefaultIngressClass(ctx)
})
b.detectOAuthProxyImageStream(ctx)
b.detectElasticsearch(ctx, apiList)
b.detectKafka(ctx, apiList)
b.detectCronjobsVersion(ctx)
b.detectAutoscalingVersion(ctx)
}
b.detectClusterRoles(ctx)
b.cleanDeployments(ctx)
}
func (b *Background) detectCronjobsVersion(ctx context.Context) {
apiGroupVersions := []string{v1.FlagCronJobsVersionBatchV1, v1.FlagCronJobsVersionBatchV1Beta1}
detectedVersion := ""
for _, apiGroupVersion := range apiGroupVersions {
groupAPIList, err := b.dcl.ServerResourcesForGroupVersion(apiGroupVersion)
if err != nil {
@ -124,26 +119,20 @@ func (b *Background) detectCronjobsVersion(ctx context.Context) {
}
for _, api := range groupAPIList.APIResources {
if api.Name == "cronjobs" {
detectedVersion = apiGroupVersion
break
viper.Set(v1.FlagCronJobsVersion, apiGroupVersion)
log.Log.V(-1).Info(fmt.Sprintf("found the cronjobs api in %s", apiGroupVersion))
return
}
}
}
if detectedVersion == "" {
log.Log.V(2).Info(
fmt.Sprintf("did not find the cronjobs api in %s", strings.Join(apiGroupVersions, " or ")),
)
} else {
viper.Set(v1.FlagCronJobsVersion, detectedVersion)
log.Log.V(-1).Info(fmt.Sprintf("found the cronjobs api in %s", detectedVersion))
}
}
func (b *Background) detectAutoscalingVersion(ctx context.Context) {
apiGroupVersions := []string{v1.FlagAutoscalingVersionV2, v1.FlagAutoscalingVersionV2Beta2}
detectedVersion := ""
for _, apiGroupVersion := range apiGroupVersions {
groupAPIList, err := b.dcl.ServerResourcesForGroupVersion(apiGroupVersion)
if err != nil {
@ -154,23 +143,16 @@ func (b *Background) detectAutoscalingVersion(ctx context.Context) {
}
for _, api := range groupAPIList.APIResources {
if api.Name == "horizontalpodautoscalers" {
detectedVersion = apiGroupVersion
break
viper.Set(v1.FlagAutoscalingVersion, apiGroupVersion)
log.Log.V(-1).Info(fmt.Sprintf("found the horizontalpodautoscalers api in %s", apiGroupVersion))
return
}
}
if detectedVersion != "" {
break
}
}
if detectedVersion == "" {
log.Log.V(2).Info(
fmt.Sprintf("did not find the autoscaling api in %s", strings.Join(apiGroupVersions, " or ")),
)
} else {
viper.Set(v1.FlagAutoscalingVersion, detectedVersion)
log.Log.V(-1).Info(fmt.Sprintf("found the horizontalpodautoscalers api in %s", detectedVersion))
}
}
// AvailableAPIs returns available list of CRDs from the cluster.
@ -188,212 +170,92 @@ func AvailableAPIs(discovery discovery.DiscoveryInterface, groups map[string]boo
if err == nil {
apiLists = append(apiLists, groupAPIList)
} else {
errors = fmt.Errorf("%w; Error getting resources for server group %s: %w", errors, sg.Name, err)
errors = fmt.Errorf("%v; Error getting resources for server group %s: %v", errors, sg.Name, err)
}
}
}
return apiLists, errors
}
func (b *Background) detectDefaultIngressClass(ctx context.Context) {
if OperatorConfiguration.GetPlatform() == OpenShiftPlatform {
return
}
ingressClasses := networkingv1.IngressClassList{}
err := b.cl.List(ctx, &ingressClasses)
if err != nil {
log.Log.Info("It was not possible to get any IngressClasses from the Kubernetes cluster")
}
oldValue := viper.GetString(v1.FlagDefaultIngressClass)
for _, ingressClass := range ingressClasses.Items {
val, ok := ingressClass.Annotations["ingressclass.kubernetes.io/is-default-class"]
if ok {
if val == "true" {
if oldValue != ingressClass.Name {
log.Log.Info("New default IngressClass value found", "old", oldValue, "new", ingressClass.Name)
}
viper.Set(v1.FlagDefaultIngressClass, ingressClass.Name)
return
}
}
}
}
func (b *Background) detectPlatform(ctx context.Context, apiList []*metav1.APIResourceList) {
// detect the platform, we run this only once, as the platform can't change between runs ;)
platform := OperatorConfiguration.GetPlatform()
detectedPlatform := ""
if !OperatorConfiguration.IsPlatformAutodetectionEnabled() {
log.Log.V(-1).Info(
"The 'platform' option is explicitly set",
"platform", platform,
)
return
}
if strings.EqualFold(viper.GetString("platform"), v1.FlagPlatformAutoDetect) {
log.Log.V(-1).Info("Attempting to auto-detect the platform")
if isOpenShift(apiList) {
detectedPlatform = OpenShiftPlatform.String()
viper.Set("platform", v1.FlagPlatformOpenShift)
} else {
detectedPlatform = KubernetesPlatform.String()
viper.Set("platform", v1.FlagPlatformKubernetes)
}
OperatorConfiguration.SetPlatform(detectedPlatform)
log.Log.Info(
"Auto-detected the platform",
"platform", detectedPlatform,
"platform", viper.GetString("platform"),
)
}
func (b *Background) detectOAuthProxyImageStream(ctx context.Context) {
tracer := otel.GetTracerProvider().Tracer(v1.BootstrapTracer)
ctx, span := tracer.Start(ctx, "detectOAuthProxyImageStream")
defer span.End()
if OperatorConfiguration.GetPlatform() != OpenShiftPlatform {
} else {
log.Log.V(-1).Info(
"Not running on OpenShift, so won't configure OAuthProxy imagestream.",
"The 'platform' option is explicitly set",
"platform", viper.GetString("platform"),
)
return
}
imageStreamNamespace := viper.GetString("openshift-oauth-proxy-imagestream-ns")
imageStreamName := viper.GetString("openshift-oauth-proxy-imagestream-name")
if imageStreamNamespace == "" || imageStreamName == "" {
log.Log.Info(
"OAuthProxy ImageStream namespace and/or name not defined",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
// if the image is already digest-based no need to get the reference from an ImageStream
currImage := OperatorConfiguration.GetOautProxyImage()
currImageReference, err := imagereference.Parse(currImage)
if err == nil {
if currImageReference.ID != "" {
log.Log.V(6).Info(
"OAuthProxy Image already digest-based",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
}
imageStream := &osimagev1.ImageStream{}
namespacedName := types.NamespacedName{
Name: imageStreamName,
Namespace: imageStreamNamespace,
}
if err = b.cl.Get(ctx, namespacedName, imageStream); err != nil {
log.Log.Error(
err,
"Failed to obtain OAuthProxy ImageStream",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
tracing.HandleError(err, span)
return
}
if len(imageStream.Status.Tags) == 0 {
log.Log.V(6).Info(
"OAuthProxy ImageStream has no tags",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
if len(imageStream.Status.Tags[0].Items) == 0 {
log.Log.V(6).Info(
"OAuthProxy ImageStream tag has no items",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
if len(imageStream.Status.Tags[0].Items[0].DockerImageReference) == 0 {
log.Log.V(5).Info(
"OAuthProxy ImageStream tag has no DockerImageReference",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
image := imageStream.Status.Tags[0].Items[0].DockerImageReference
OperatorConfiguration.SetOautProxyImage(image)
log.Log.Info(
"Updated OAuth Proxy image flag",
"image", image,
)
}
func (b *Background) detectElasticsearch(ctx context.Context, apiList []*metav1.APIResourceList) {
// detect whether the Elasticsearch operator is available
currentESProvision := OperatorConfiguration.GetESPIntegration()
if !b.retryDetectEs {
if b.retryDetectEs {
log.Log.V(-1).Info(
"ES Operator integration explicitly set",
v1.FlagESProvision, currentESProvision.String(),
"Determining whether we should enable the Elasticsearch Operator integration",
)
}
log.Log.V(-1).Info("Determining whether we should enable the Elasticsearch Operator integration")
esProvision := ESOperatorIntegrationNo
previous := viper.GetString("es-provision")
if IsElasticsearchOperatorAvailable(apiList) {
esProvision = ESOperatorIntegrationYes
viper.Set("es-provision", v1.FlagProvisionElasticsearchYes)
} else {
viper.Set("es-provision", v1.FlagProvisionElasticsearchNo)
}
if currentESProvision != esProvision {
if previous != viper.GetString("es-provision") {
log.Log.Info(
"Automatically adjusted the integration with the ES Operator",
v1.FlagESProvision, esProvision.String(),
"Automatically adjusted the 'es-provision' flag",
"es-provision", viper.GetString("es-provision"),
)
}
} else {
log.Log.V(-1).Info(
"The 'es-provision' option is explicitly set",
"es-provision", viper.GetString("es-provision"),
)
OperatorConfiguration.SetESIngration(esProvision)
}
}
// detectKafka checks whether the Kafka Operator is available
func (b *Background) detectKafka(_ context.Context, apiList []*metav1.APIResourceList) {
currentKafkaProvision := OperatorConfiguration.GetKafkaIntegration()
if !b.retryDetectKafka {
log.Log.V(-1).Info(
"The 'kafka-provision' option is explicitly set",
"kafka-provision", currentKafkaProvision.String(),
)
return
}
// viper has a "IsSet" method that we could use, except that it returns "true" even
// when nothing is set but it finds a 'Default' value...
if b.retryDetectKafka {
log.Log.V(-1).Info("Determining whether we should enable the Kafka Operator integration")
kafkaProvision := KafkaOperatorIntegrationNo
previous := viper.GetString("kafka-provision")
if isKafkaOperatorAvailable(apiList) {
kafkaProvision = KafkaOperatorIntegrationYes
viper.Set("kafka-provision", v1.FlagProvisionKafkaYes)
} else {
viper.Set("kafka-provision", v1.FlagProvisionKafkaNo)
}
if currentKafkaProvision != kafkaProvision {
if previous != viper.GetString("kafka-provision") {
log.Log.Info(
"Automatically adjusted the 'kafka-provision' flag",
"kafka-provision", kafkaProvision.String(),
"kafka-provision", viper.GetString("kafka-provision"),
)
}
} else {
log.Log.V(-1).Info(
"The 'kafka-provision' option is explicitly set",
"kafka-provision", viper.GetString("kafka-provision"),
)
OperatorConfiguration.SetKafkaIntegration(kafkaProvision)
}
}
func (b *Background) detectClusterRoles(ctx context.Context) {
if OperatorConfiguration.GetPlatform() != OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return
}
tr := &authenticationapi.TokenReview{
@ -402,40 +264,100 @@ func (b *Background) detectClusterRoles(ctx context.Context) {
Token: "TEST",
},
}
currentAuthDelegator := OperatorConfiguration.GetAuthDelegator()
var newAuthDelegator AuthDelegatorAvailability
if err := b.cl.Create(ctx, tr); err != nil {
if !OperatorConfiguration.IsAuthDelegatorSet() || OperatorConfiguration.IsAuthDelegatorAvailable() {
if !viper.IsSet("auth-delegator-available") || (viper.IsSet("auth-delegator-available") && viper.GetBool("auth-delegator-available")) {
// for the first run, we log this info, or when the previous value was true
log.Log.Info(
"The service account running this operator does not have the role 'system:auth-delegator', consider granting it for additional capabilities",
)
}
newAuthDelegator = AuthDelegatorAvailabilityNo
viper.Set("auth-delegator-available", false)
} else {
// this isn't technically correct, as we only ensured that we can create token reviews (which is what the OAuth Proxy does)
// but it might be the case that we have *another* cluster role that includes this access and still not have
// the "system:auth-delegator". This is an edge case, and it's more complicated to check that, so, we'll keep it simple for now
// and deal with the edge case if it ever manifests in the real world
if !OperatorConfiguration.IsAuthDelegatorSet() || (OperatorConfiguration.IsAuthDelegatorSet() && !OperatorConfiguration.IsAuthDelegatorAvailable()) {
if !viper.IsSet("auth-delegator-available") || (viper.IsSet("auth-delegator-available") && !viper.GetBool("auth-delegator-available")) {
// for the first run, we log this info, or when the previous value was 'false'
log.Log.Info(
"The service account running this operator has the role 'system:auth-delegator', enabling OAuth Proxy's 'delegate-urls' option",
)
}
newAuthDelegator = AuthDelegatorAvailabilityYes
viper.Set("auth-delegator-available", true)
}
}
func (b *Background) cleanDeployments(ctx context.Context) {
log.Log.V(-1).Info("detecting orphaned deployments.")
instancesMap := make(map[string]*v1.Jaeger)
deployments := &appsv1.DeploymentList{}
deployOpts := []client.ListOption{
matchingLabelKeys(map[string]string{inject.Label: ""}),
}
if currentAuthDelegator != newAuthDelegator || !OperatorConfiguration.IsAuthDelegatorSet() {
OperatorConfiguration.SetAuthDelegatorAvailability(newAuthDelegator)
// if we are not watching all namespaces, we have to get items from each namespace being watched
if namespaces := viper.GetString(v1.ConfigWatchNamespace); namespaces != v1.WatchAllNamespaces {
for _, ns := range strings.Split(namespaces, ",") {
nsDeps := &appsv1.DeploymentList{}
if err := b.clReader.List(ctx, nsDeps, append(deployOpts, client.InNamespace(ns))...); err != nil {
log.Log.Error(
err,
"error getting a list of deployments to analyze in namespace",
"namespace", ns,
)
}
deployments.Items = append(deployments.Items, nsDeps.Items...)
instances := &v1.JaegerList{}
if err := b.clReader.List(ctx, instances, client.InNamespace(ns)); err != nil {
log.Log.Error(
err,
"error getting a list of existing jaeger instances in namespace",
"namespace", ns,
)
}
for i := range instances.Items {
instancesMap[instances.Items[i].Name] = &instances.Items[i]
}
}
} else {
if err := b.clReader.List(ctx, deployments, deployOpts...); err != nil {
log.Log.Error(
err,
"error getting a list of deployments to analyze",
)
}
if err := b.cl.Delete(ctx, tr); err != nil {
// Remove the test Token.
// If the token could not be created due to permissions, we're ok.
// If the token was created, we remove it to ensure the next iteration doesn't fail.
// If the token creation failed because it was created before, we remove it to ensure the next iteration doesn't fail.
log.Log.V(2).Info("The jaeger-operator-TEST TokenReview could not be removed: %w", err)
instances := &v1.JaegerList{}
if err := b.clReader.List(ctx, instances); err != nil {
log.Log.Error(
err,
"error getting a list of existing jaeger instances",
)
}
for i := range instances.Items {
instancesMap[instances.Items[i].Name] = &instances.Items[i]
}
}
// check deployments to see which one needs to be cleaned.
for i := range deployments.Items {
dep := deployments.Items[i]
if instanceName, ok := dep.Labels[inject.Label]; ok {
_, instanceExists := instancesMap[instanceName]
if !instanceExists { // Jaeger instance not exist anymore, we need to clean this up.
inject.CleanSidecar(instanceName, &dep)
if err := b.cl.Update(ctx, &dep); err != nil {
log.Log.Error(
err,
"error cleaning orphaned deployment",
"deploymentName", dep.Name,
"deploymentNamespace", dep.Namespace,
)
}
}
}
}
}
@ -470,3 +392,18 @@ func isKafkaOperatorAvailable(apiList []*metav1.APIResourceList) bool {
}
return false
}
type matchingLabelKeys map[string]string
func (m matchingLabelKeys) ApplyToList(opts *client.ListOptions) {
sel := labels.NewSelector()
for k := range map[string]string(m) {
req, err := labels.NewRequirement(k, selection.Exists, []string{})
if err != nil {
log.Log.Error(err, "failed to build label selector")
return
}
sel.Add(*req)
}
opts.LabelSelector = sel
}

View File

@ -6,27 +6,33 @@ import (
"testing"
"time"
openapi_v2 "github.com/google/gnostic-models/openapiv2"
openapi_v2 "github.com/google/gnostic/openapiv2"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
networkingv1 "k8s.io/api/networking/v1"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
fakeRest "k8s.io/client-go/rest/fake"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
func TestStart(t *testing.T) {
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
// sanity check
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.IsSet("auth-delegator-available"))
// prepare
dcl := &fakeDiscoveryClient{}
@ -36,7 +42,7 @@ func TestStart(t *testing.T) {
done := make(chan bool)
go func() {
for {
if OperatorConfiguration.IsAuthDelegatorSet() {
if viper.IsSet("auth-delegator-available") {
break
}
// it would typically take less than 10ms to get the first result already, so, it should wait only once
@ -51,14 +57,14 @@ func TestStart(t *testing.T) {
// verify
select {
case <-done:
assert.True(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.True(t, viper.GetBool("auth-delegator-available"))
case <-time.After(1 * time.Second):
assert.Fail(t, "timed out waiting for the start process to detect the capabilities")
}
}
func TestStartContinuesInBackground(t *testing.T) {
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
// prepare
@ -72,7 +78,7 @@ func TestStartContinuesInBackground(t *testing.T) {
done := make(chan bool)
go func() {
for {
if OperatorConfiguration.IsAuthDelegatorSet() {
if viper.IsSet("auth-delegator-available") {
break
}
// it would typically take less than 10ms to get the first result already, so, it should wait only once
@ -85,7 +91,7 @@ func TestStartContinuesInBackground(t *testing.T) {
select {
case <-done:
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.GetBool("auth-delegator-available"))
case <-time.After(1 * time.Second):
assert.Fail(t, "timed out waiting for the start process to detect the capabilities")
}
@ -95,7 +101,7 @@ func TestStartContinuesInBackground(t *testing.T) {
go func() {
for {
if OperatorConfiguration.IsAuthDelegatorAvailable() {
if viper.GetBool("auth-delegator-available") {
break
}
time.Sleep(500 * time.Millisecond)
@ -106,7 +112,7 @@ func TestStartContinuesInBackground(t *testing.T) {
// verify
select {
case <-done:
assert.True(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.True(t, viper.GetBool("auth-delegator-available"))
case <-time.After(6 * time.Second): // this one might take up to 5 seconds to run again + processing time
assert.Fail(t, "timed out waiting for the start process to detect the new capabilities")
}
@ -198,7 +204,7 @@ func TestAutoDetectOpenShift(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.Equal(t, OpenShiftPlatform, OperatorConfiguration.GetPlatform())
assert.Equal(t, v1.FlagPlatformOpenShift, viper.GetString("platform"))
// set the error
dcl.ServerResourcesForGroupVersionFunc = func(_ string) (apiGroupList *metav1.APIResourceList, err error) {
@ -209,7 +215,7 @@ func TestAutoDetectOpenShift(t *testing.T) {
b.autoDetectCapabilities()
// verify again
assert.Equal(t, OpenShiftPlatform, OperatorConfiguration.GetPlatform())
assert.Equal(t, v1.FlagPlatformOpenShift, viper.GetString("platform"))
}
func TestAutoDetectKubernetes(t *testing.T) {
@ -225,12 +231,12 @@ func TestAutoDetectKubernetes(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.Equal(t, KubernetesPlatform, OperatorConfiguration.GetPlatform())
assert.Equal(t, v1.FlagPlatformKubernetes, viper.GetString("platform"))
}
func TestExplicitPlatform(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -241,7 +247,7 @@ func TestExplicitPlatform(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.Equal(t, OpenShiftPlatform, OperatorConfiguration.GetPlatform())
assert.Equal(t, v1.FlagPlatformOpenShift, viper.GetString("platform"))
}
func TestAutoDetectEsProvisionNoEsOperator(t *testing.T) {
@ -257,7 +263,7 @@ func TestAutoDetectEsProvisionNoEsOperator(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.False(t, OperatorConfiguration.IsESOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionElasticsearchNo, viper.GetString("es-provision"))
}
func TestAutoDetectEsProvisionWithEsOperator(t *testing.T) {
@ -287,7 +293,7 @@ func TestAutoDetectEsProvisionWithEsOperator(t *testing.T) {
}, nil
}
b.autoDetectCapabilities()
assert.True(t, OperatorConfiguration.IsESOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionElasticsearchYes, viper.GetString("es-provision"))
})
t.Run("no kind Elasticsearch", func(t *testing.T) {
@ -302,7 +308,7 @@ func TestAutoDetectEsProvisionWithEsOperator(t *testing.T) {
}, nil
}
b.autoDetectCapabilities()
assert.False(t, OperatorConfiguration.IsESOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionElasticsearchNo, viper.GetString("es-provision"))
})
}
@ -319,7 +325,7 @@ func TestAutoDetectKafkaProvisionNoKafkaOperator(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.False(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaNo, viper.GetString("kafka-provision"))
}
func TestAutoDetectKafkaProvisionWithKafkaOperator(t *testing.T) {
@ -345,12 +351,12 @@ func TestAutoDetectKafkaProvisionWithKafkaOperator(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.True(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaYes, viper.GetString("kafka-provision"))
}
func TestAutoDetectKafkaExplicitYes(t *testing.T) {
// prepare
OperatorConfiguration.SetKafkaIntegration(KafkaOperatorIntegrationYes)
viper.Set("kafka-provision", v1.FlagProvisionKafkaYes)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -361,12 +367,12 @@ func TestAutoDetectKafkaExplicitYes(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.True(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaYes, viper.GetString("kafka-provision"))
}
func TestAutoDetectKafkaExplicitNo(t *testing.T) {
// prepare
OperatorConfiguration.SetKafkaIntegration(KafkaOperatorIntegrationNo)
viper.Set("kafka-provision", v1.FlagProvisionKafkaNo)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -377,7 +383,7 @@ func TestAutoDetectKafkaExplicitNo(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.False(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaNo, viper.GetString("kafka-provision"))
}
func TestAutoDetectKafkaDefaultNoOperator(t *testing.T) {
@ -393,7 +399,7 @@ func TestAutoDetectKafkaDefaultNoOperator(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.False(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaNo, viper.GetString("kafka-provision"))
}
func TestAutoDetectKafkaDefaultWithOperator(t *testing.T) {
@ -418,7 +424,7 @@ func TestAutoDetectKafkaDefaultWithOperator(t *testing.T) {
b.autoDetectCapabilities()
// verify
assert.True(t, OperatorConfiguration.IsKafkaOperatorIntegrationEnabled())
assert.Equal(t, v1.FlagProvisionKafkaYes, viper.GetString("kafka-provision"))
}
func TestAutoDetectCronJobsVersion(t *testing.T) {
@ -447,6 +453,7 @@ func TestAutoDetectCronJobsVersion(t *testing.T) {
// verify
assert.Equal(t, apiGroup, viper.GetString(v1.FlagCronJobsVersion))
fmt.Printf("Test finished on [%s]\n", apiGroup)
}
}
@ -476,6 +483,7 @@ func TestAutoDetectAutoscalingVersion(t *testing.T) {
// verify
assert.Equal(t, apiGroup, viper.GetString(v1.FlagAutoscalingVersion))
fmt.Printf("Test finished on [%s]\n", apiGroup)
}
// Check what happens when there ServerResourcesForGroupVersion returns error
@ -495,51 +503,11 @@ func TestAutoDetectAutoscalingVersion(t *testing.T) {
// test
b.autoDetectCapabilities()
// Test the newer version is selected
dcl = &fakeDiscoveryClient{}
cl = fake.NewFakeClient() // nolint:staticcheck
b = WithClients(cl, dcl, cl)
dcl.ServerGroupsFunc = func() (apiGroupList *metav1.APIGroupList, err error) {
return &metav1.APIGroupList{
Groups: []metav1.APIGroup{
{
Name: v1.FlagAutoscalingVersionV2,
Versions: []metav1.GroupVersionForDiscovery{
{Version: v1.FlagAutoscalingVersionV2},
},
},
{
Name: v1.FlagAutoscalingVersionV2Beta2,
Versions: []metav1.GroupVersionForDiscovery{
{Version: v1.FlagAutoscalingVersionV2Beta2},
},
},
},
}, nil
}
dcl.ServerResourcesForGroupVersionFunc = func(requestedApiVersion string) (apiGroupList *metav1.APIResourceList, err error) {
if requestedApiVersion == v1.FlagAutoscalingVersionV2 {
apiResourceList := &metav1.APIResourceList{GroupVersion: v1.FlagAutoscalingVersionV2, APIResources: []metav1.APIResource{{Name: "horizontalpodautoscalers"}}}
return apiResourceList, nil
} else if requestedApiVersion == v1.FlagAutoscalingVersionV2Beta2 {
apiResourceList := &metav1.APIResourceList{GroupVersion: v1.FlagAutoscalingVersionV2Beta2, APIResources: []metav1.APIResource{{Name: "horizontalpodautoscalers"}}}
return apiResourceList, nil
}
return &metav1.APIResourceList{}, nil
}
// test
b.autoDetectCapabilities()
// verify
assert.Equal(t, v1.FlagAutoscalingVersionV2, viper.GetString(v1.FlagAutoscalingVersion))
}
func TestSkipAuthDelegatorNonOpenShift(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(KubernetesPlatform)
viper.Set("platform", v1.FlagPlatformKubernetes)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -550,12 +518,12 @@ func TestSkipAuthDelegatorNonOpenShift(t *testing.T) {
b.detectClusterRoles(context.Background())
// verify
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.IsSet("auth-delegator-available"))
}
func TestNoAuthDelegatorAvailable(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -569,12 +537,12 @@ func TestNoAuthDelegatorAvailable(t *testing.T) {
b.detectClusterRoles(context.Background())
// verify
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.GetBool("auth-delegator-available"))
}
func TestAuthDelegatorBecomesAvailable(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -586,16 +554,16 @@ func TestAuthDelegatorBecomesAvailable(t *testing.T) {
// test
b.detectClusterRoles(context.Background())
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.GetBool("auth-delegator-available"))
cl.CreateFunc = cl.Client.Create
b.detectClusterRoles(context.Background())
assert.True(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.True(t, viper.GetBool("auth-delegator-available"))
}
func TestAuthDelegatorBecomesUnavailable(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
@ -604,113 +572,139 @@ func TestAuthDelegatorBecomesUnavailable(t *testing.T) {
// test
b.detectClusterRoles(context.Background())
assert.True(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.True(t, viper.GetBool("auth-delegator-available"))
cl.CreateFunc = func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
return fmt.Errorf("faked error")
}
b.detectClusterRoles(context.Background())
assert.False(t, OperatorConfiguration.IsAuthDelegatorAvailable())
assert.False(t, viper.GetBool("auth-delegator-available"))
}
func TestSkipDefaultIngressClassOpenShift(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(OpenShiftPlatform)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
cl := customFakeClient()
b := WithClients(cl, dcl, cl)
// test
b.detectDefaultIngressClass(context.Background())
// verify
assert.Equal(t, "", viper.GetString(v1.FlagDefaultIngressClass))
}
func TestDetectDefaultIngressClass(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(KubernetesPlatform)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
cl := customFakeClient()
cl.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if listPointer, ok := list.(*networkingv1.IngressClassList); ok {
listPointer.Items = []networkingv1.IngressClass{
func TestCleanDeployments(t *testing.T) {
for _, tt := range []struct {
cap string // caption for the test
watchNamespace string // the value for WATCH_NAMESPACE
jaegerNamespace string // in which namespace the jaeger exists, empty for non existing
deleted bool // whether the sidecar should have been deleted
}{
{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
Annotations: map[string]string{
"ingressclass.kubernetes.io/is-default-class": "true",
cap: "existing-same-namespace",
watchNamespace: "observability",
jaegerNamespace: "observability",
deleted: false,
},
{
cap: "not-existing-same-namespace",
watchNamespace: "observability",
jaegerNamespace: "",
deleted: true,
},
{
cap: "existing-watched-namespace",
watchNamespace: "observability,other-observability",
jaegerNamespace: "other-observability",
deleted: false,
},
}
}
return nil
}
b := WithClients(cl, dcl, cl)
// test
b.detectDefaultIngressClass(context.Background())
// verify
assert.Equal(t, "nginx", viper.GetString(v1.FlagDefaultIngressClass))
}
func TestDetectNoDefaultIngressClass(t *testing.T) {
// prepare
OperatorConfiguration.SetPlatform(KubernetesPlatform)
{
cap: "existing-non-watched-namespace",
watchNamespace: "observability",
jaegerNamespace: "other-observability",
deleted: true,
},
{
cap: "existing-watching-all-namespaces",
watchNamespace: v1.WatchAllNamespaces,
jaegerNamespace: "other-observability",
deleted: false,
},
} {
t.Run(tt.cap, func(t *testing.T) {
// prepare the test data
viper.Set(v1.ConfigWatchNamespace, tt.watchNamespace)
defer viper.Reset()
dcl := &fakeDiscoveryClient{}
cl := customFakeClient()
jaeger := v1.NewJaeger(types.NamespacedName{
Name: "my-instance",
Namespace: "observability", // at first, it exists in the same namespace as the deployment
})
cl.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if listPointer, ok := list.(*networkingv1.IngressClassList); ok {
listPointer.Items = []networkingv1.IngressClass{
{
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
Name: "mydep",
Namespace: "observability",
Annotations: map[string]string{inject.Annotation: jaeger.Name},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "C1",
Image: "image1",
},
},
},
},
},
}
dep = inject.Sidecar(jaeger, dep)
// sanity check
require.Equal(t, 2, len(dep.Spec.Template.Spec.Containers))
// prepare the list of existing objects
objs := []runtime.Object{dep}
if len(tt.jaegerNamespace) > 0 {
jaeger.Namespace = tt.jaegerNamespace // now, it exists only in this namespace
objs = append(objs, jaeger)
}
return nil
}
b := WithClients(cl, dcl, cl)
// prepare the client
s := scheme.Scheme
s.AddKnownTypes(v1.GroupVersion, &v1.Jaeger{})
s.AddKnownTypes(v1.GroupVersion, &v1.JaegerList{})
cl := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
b := WithClients(cl, &fakeDiscoveryClient{}, cl)
// test
b.detectDefaultIngressClass(context.Background())
b.cleanDeployments(context.Background())
// verify
assert.Equal(t, "", viper.GetString(v1.FlagDefaultIngressClass))
persisted := &appsv1.Deployment{}
err := cl.Get(context.Background(), types.NamespacedName{
Namespace: dep.Namespace,
Name: dep.Name,
}, persisted)
require.NoError(t, err)
// should the sidecar have been deleted?
if tt.deleted {
assert.Equal(t, 1, len(persisted.Spec.Template.Spec.Containers))
assert.NotContains(t, persisted.Labels, inject.Label)
} else {
assert.Equal(t, 2, len(persisted.Spec.Template.Spec.Containers))
assert.Contains(t, persisted.Labels, inject.Label)
}
})
}
}
type fakeClient struct {
client.Client
CreateFunc func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error
ListFunc func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error
}
func customFakeClient() *fakeClient {
c := fake.NewClientBuilder().Build()
return &fakeClient{Client: c, CreateFunc: c.Create, ListFunc: c.List}
return &fakeClient{Client: c, CreateFunc: c.Create}
}
func (f *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
return f.CreateFunc(ctx, obj)
}
func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
return f.ListFunc(ctx, list)
}
type fakeDiscoveryClient struct {
discovery.DiscoveryInterface
ServerGroupsFunc func() (apiGroupList *metav1.APIGroupList, err error)

View File

@ -1,237 +0,0 @@
package autodetect
import (
"strings"
"sync"
"github.com/spf13/viper"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
)
// Platform holds the auto-detected running platform.
type Platform int
const (
// KubernetesPlatform represents the cluster is Kubernetes.
KubernetesPlatform Platform = iota
// OpenShiftPlatform represents the cluster is OpenShift.
OpenShiftPlatform
)
func (p Platform) String() string {
return [...]string{"Kubernetes", "OpenShift"}[p]
}
// ESOperatorIntegration holds the if the ES Operator integration is enabled.
type ESOperatorIntegration int
const (
// ESOperatorIntegrationYes represents the ES Operator integration is enabled.
ESOperatorIntegrationYes ESOperatorIntegration = iota
// ESOperatorIntegrationNo represents the ES Operator integration is disabled.
ESOperatorIntegrationNo
)
func (p ESOperatorIntegration) String() string {
return [...]string{"Yes", "No"}[p]
}
// KafkaOperatorIntegration holds the if the Kafka Operator integration is enabled.
type KafkaOperatorIntegration int
const (
// KafkaOperatorIntegrationYes represents the Kafka Operator integration is enabled.
KafkaOperatorIntegrationYes KafkaOperatorIntegration = iota
// KafkaOperatorIntegrationNo represents the Kafka Operator integration is disabled.
KafkaOperatorIntegrationNo
)
func (p KafkaOperatorIntegration) String() string {
return [...]string{"Yes", "No"}[p]
}
// AuthDelegatorAvailability holds the if the AuthDelegator available.
type AuthDelegatorAvailability int
const (
// AuthDelegatorAvailabilityYes represents the AuthDelegator is available.
AuthDelegatorAvailabilityYes AuthDelegatorAvailability = iota
// AuthDelegatorAvailabilityNo represents the AuthDelegator is not available.
AuthDelegatorAvailabilityNo
// AuthDelegatorAvailabilityUnknown represents the AuthDelegator availability is not known.
AuthDelegatorAvailabilityUnknown
)
func (p AuthDelegatorAvailability) String() string {
return [...]string{"Yes", "No", "Unknown"}[p]
}
var OperatorConfiguration operatorConfigurationWrapper
type operatorConfigurationWrapper struct {
mu sync.RWMutex
}
func (c *operatorConfigurationWrapper) SetPlatform(p interface{}) {
var platform string
switch v := p.(type) {
case string:
platform = v
case Platform:
platform = v.String()
default:
platform = KubernetesPlatform.String()
}
c.mu.Lock()
viper.Set(v1.FlagPlatform, platform)
c.mu.Unlock()
}
func (c *operatorConfigurationWrapper) GetPlatform() Platform {
c.mu.RLock()
p := viper.GetString(v1.FlagPlatform)
c.mu.RUnlock()
if strings.ToLower(p) == "openshift" {
return OpenShiftPlatform
}
return KubernetesPlatform
}
func (c *operatorConfigurationWrapper) IsPlatformAutodetectionEnabled() bool {
c.mu.RLock()
p := viper.GetString(v1.FlagPlatform)
c.mu.RUnlock()
return strings.EqualFold(p, v1.FlagPlatformAutoDetect)
}
func (c *operatorConfigurationWrapper) SetESIngration(e interface{}) {
var integration string
switch v := e.(type) {
case string:
integration = v
case ESOperatorIntegration:
integration = v.String()
default:
integration = ESOperatorIntegrationNo.String()
}
c.mu.Lock()
viper.Set(v1.FlagESProvision, integration)
c.mu.Unlock()
}
func (c *operatorConfigurationWrapper) GetESPIntegration() ESOperatorIntegration {
c.mu.RLock()
e := viper.GetString(v1.FlagESProvision)
c.mu.RUnlock()
if strings.ToLower(e) == "yes" {
return ESOperatorIntegrationYes
}
return ESOperatorIntegrationNo
}
// IsESOperatorIntegrationEnabled returns true if the integration with the
// Elasticsearch OpenShift Operator is enabled
func (c *operatorConfigurationWrapper) IsESOperatorIntegrationEnabled() bool {
return c.GetESPIntegration() == ESOperatorIntegrationYes
}
func (c *operatorConfigurationWrapper) SetKafkaIntegration(e interface{}) {
var integration string
switch v := e.(type) {
case string:
integration = v
case KafkaOperatorIntegration:
integration = v.String()
default:
integration = KafkaOperatorIntegrationNo.String()
}
c.mu.Lock()
viper.Set(v1.FlagKafkaProvision, integration)
c.mu.Unlock()
}
func (c *operatorConfigurationWrapper) GetKafkaIntegration() KafkaOperatorIntegration {
c.mu.RLock()
e := viper.GetString(v1.FlagKafkaProvision)
c.mu.RUnlock()
if strings.ToLower(e) == "yes" {
return KafkaOperatorIntegrationYes
}
return KafkaOperatorIntegrationNo
}
// IsKafkaOperatorIntegrationEnabled returns true if the integration with the
// Kafaka Operator is enabled
func (c *operatorConfigurationWrapper) IsKafkaOperatorIntegrationEnabled() bool {
return c.GetKafkaIntegration() == KafkaOperatorIntegrationYes
}
func (c *operatorConfigurationWrapper) SetAuthDelegatorAvailability(e interface{}) {
var availability string
switch v := e.(type) {
case string:
availability = v
case AuthDelegatorAvailability:
availability = v.String()
default:
availability = AuthDelegatorAvailabilityUnknown.String()
}
c.mu.Lock()
viper.Set(v1.FlagAuthDelegatorAvailability, availability)
c.mu.Unlock()
}
func (c *operatorConfigurationWrapper) GetAuthDelegator() AuthDelegatorAvailability {
c.mu.RLock()
e := viper.GetString(v1.FlagAuthDelegatorAvailability)
c.mu.RUnlock()
var available AuthDelegatorAvailability
switch strings.ToLower(e) {
case "yes":
available = AuthDelegatorAvailabilityYes
case "no":
available = AuthDelegatorAvailabilityNo
default:
available = AuthDelegatorAvailabilityUnknown
}
return available
}
// IsAuthDelegatorAvailable returns true if the AuthDelegator is available
func (c *operatorConfigurationWrapper) IsAuthDelegatorAvailable() bool {
return c.GetAuthDelegator() == AuthDelegatorAvailabilityYes
}
// IsAuthDelegatorAvailable returns true if the AuthDelegator is set
func (c *operatorConfigurationWrapper) IsAuthDelegatorSet() bool {
return c.GetAuthDelegator() != AuthDelegatorAvailabilityUnknown
}
func (c *operatorConfigurationWrapper) SetOautProxyImage(image string) {
c.mu.Lock()
viper.Set(v1.FlagOpenShiftOauthProxyImage, image)
c.mu.Unlock()
}
func (c *operatorConfigurationWrapper) GetOautProxyImage() string {
c.mu.RLock()
image := viper.GetString(v1.FlagOpenShiftOauthProxyImage)
c.mu.RUnlock()
return image
}

View File

@ -3,19 +3,19 @@ package clusterrolebinding
import (
"fmt"
"github.com/spf13/viper"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/account"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/util"
)
// Get returns all the service accounts to be created for this Jaeger instance
func Get(jaeger *v1.Jaeger) []rbac.ClusterRoleBinding {
if jaeger.Spec.Ingress.Security == v1.IngressSecurityOAuthProxy && len(jaeger.Spec.Ingress.Openshift.DelegateUrls) > 0 {
if autodetect.OperatorConfiguration.IsAuthDelegatorAvailable() {
if viper.GetBool("auth-delegator-available") {
return []rbac.ClusterRoleBinding{oauthProxyAuthDelegator(jaeger)}
}

View File

@ -9,7 +9,6 @@ import (
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/account"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
)
func TestGetClusterRoleBinding(t *testing.T) {
@ -17,7 +16,7 @@ func TestGetClusterRoleBinding(t *testing.T) {
name := "TestGetClusterRoleBinding"
trueVar := true
autodetect.OperatorConfiguration.SetAuthDelegatorAvailability(autodetect.AuthDelegatorAvailabilityYes)
viper.Set("auth-delegator-available", true)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: name})
@ -36,7 +35,7 @@ func TestGetClusterRoleBinding(t *testing.T) {
assert.Len(t, crbs[0].Subjects, 1)
assert.Equal(t, account.OAuthProxyAccountNameFor(jaeger), crbs[0].Subjects[0].Name)
assert.Equal(t, "ServiceAccount", crbs[0].Subjects[0].Kind)
assert.Empty(t, crbs[0].Subjects[0].Namespace) // cluster roles aren't namespaced
assert.Len(t, crbs[0].Subjects[0].Namespace, 0) // cluster roles aren't namespaced
}
func TestIngressDisabled(t *testing.T) {
@ -53,7 +52,7 @@ func TestIngressDisabled(t *testing.T) {
crbs := Get(jaeger)
// verify
assert.Empty(t, crbs)
assert.Len(t, crbs, 0)
}
func TestNotOAuthProxy(t *testing.T) {
@ -70,7 +69,7 @@ func TestNotOAuthProxy(t *testing.T) {
crbs := Get(jaeger)
// verify
assert.Empty(t, crbs)
assert.Len(t, crbs, 0)
}
func TestAuthDelegatorNotAvailable(t *testing.T) {
@ -78,7 +77,7 @@ func TestAuthDelegatorNotAvailable(t *testing.T) {
name := "TestAuthDelegatorNotAvailable"
trueVar := true
autodetect.OperatorConfiguration.SetAuthDelegatorAvailability(autodetect.AuthDelegatorAvailabilityNo)
viper.Set("auth-delegator-available", false)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: name})
@ -90,5 +89,5 @@ func TestAuthDelegatorNotAvailable(t *testing.T) {
crbs := Get(jaeger)
// verify
assert.Empty(t, crbs)
assert.Len(t, crbs, 0)
}

View File

@ -2,7 +2,6 @@ package generate
import (
"context"
"errors"
"fmt"
"io"
"os"
@ -56,7 +55,7 @@ func createSpecFromYAML(filename string) (*v1.Jaeger, error) {
var spec v1.Jaeger
decoder := yaml.NewYAMLOrJSONDecoder(f, 8192)
if err := decoder.Decode(&spec); err != nil && !errors.Is(err, io.EOF) {
if err := decoder.Decode(&spec); err != nil && err != io.EOF {
return nil, err
}

View File

@ -19,12 +19,11 @@ import (
"go.uber.org/zap/zapcore"
corev1 "k8s.io/api/core/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/log"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
// import OIDC cluster authentication plugin, e.g. for IBM Cloud
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
@ -42,7 +41,6 @@ import (
appsv1controllers "github.com/jaegertracing/jaeger-operator/controllers/appsv1"
esv1controllers "github.com/jaegertracing/jaeger-operator/controllers/elasticsearch"
jaegertracingcontrollers "github.com/jaegertracing/jaeger-operator/controllers/jaegertracing"
"github.com/jaegertracing/jaeger-operator/pkg/autoclean"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
kafkav1beta2 "github.com/jaegertracing/jaeger-operator/pkg/kafka/v1beta2"
opmetrics "github.com/jaegertracing/jaeger-operator/pkg/metrics"
@ -105,7 +103,7 @@ func bootstrap(ctx context.Context) manager.Manager {
log.Log.V(6).Info("%s", err)
}
span.SetAttributes(otelattribute.String("Platform", autodetect.OperatorConfiguration.GetPlatform().String()))
span.SetAttributes(otelattribute.String("Platform", viper.GetString("platform")))
watchNamespace, found := os.LookupEnv("WATCH_NAMESPACE")
if found {
setupLog.Info("watching namespace(s)", "namespaces", watchNamespace)
@ -126,19 +124,11 @@ func bootstrap(ctx context.Context) manager.Manager {
d.Start()
}
if c, err := autoclean.New(mgr); err != nil {
log.Log.Error(
err,
"failed to start the background process to auto-clean the operator objects",
)
} else {
c.Start()
}
detectNamespacePermissions(ctx, mgr)
performUpgrades(ctx, mgr)
setupControllers(ctx, mgr)
setupWebhooks(ctx, mgr)
detectOAuthProxyImageStream(ctx, mgr)
err = opmetrics.Bootstrap(ctx, namespace, mgr.GetClient())
if err != nil {
log.Log.Error(err, "failed to initialize metrics")
@ -146,6 +136,81 @@ func bootstrap(ctx context.Context) manager.Manager {
return mgr
}
func detectOAuthProxyImageStream(ctx context.Context, mgr manager.Manager) {
tracer := otel.GetTracerProvider().Tracer(v1.BootstrapTracer)
ctx, span := tracer.Start(ctx, "detectOAuthProxyImageStream")
defer span.End()
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
log.Log.V(-1).Info(
"Not running on OpenShift, so won't configure OAuthProxy imagestream.",
)
return
}
imageStreamNamespace := viper.GetString("openshift-oauth-proxy-imagestream-ns")
imageStreamName := viper.GetString("openshift-oauth-proxy-imagestream-name")
if imageStreamNamespace == "" || imageStreamName == "" {
log.Log.Info(
"OAuthProxy ImageStream namespace and/or name not defined",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
imageStream := &osimagev1.ImageStream{}
namespacedName := types.NamespacedName{
Name: imageStreamName,
Namespace: imageStreamNamespace,
}
if err := mgr.GetAPIReader().Get(ctx, namespacedName, imageStream); err != nil {
log.Log.Error(
err,
"Failed to obtain OAuthProxy ImageStream",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
tracing.HandleError(err, span)
return
}
if len(imageStream.Status.Tags) == 0 {
log.Log.V(6).Info(
"OAuthProxy ImageStream has no tags",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
if len(imageStream.Status.Tags[0].Items) == 0 {
log.Log.V(6).Info(
"OAuthProxy ImageStream tag has no items",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
if len(imageStream.Status.Tags[0].Items[0].DockerImageReference) == 0 {
log.Log.V(5).Info(
"OAuthProxy ImageStream tag has no DockerImageReference",
"namespace", imageStreamNamespace,
"name", imageStreamName,
)
return
}
image := imageStream.Status.Tags[0].Items[0].DockerImageReference
viper.Set("openshift-oauth-proxy-image", image)
log.Log.Info(
"Updated OAuth Proxy image flag",
"image", image,
)
}
func detectNamespacePermissions(ctx context.Context, mgr manager.Manager) {
tracer := otel.GetTracerProvider().Tracer(v1.BootstrapTracer)
ctx, span := tracer.Start(ctx, "detectNamespacePermissions")
@ -256,6 +321,8 @@ func createManager(ctx context.Context, cfg *rest.Config) manager.Manager {
probeAddr := viper.GetString("health-probe-bind-address")
webhookPort := viper.GetInt("webhook-bind-port")
namespace := viper.GetString(v1.ConfigWatchNamespace)
var tlsOpt tlsConfig
tlsOpt.minVersion = viper.GetString("tls-min-version")
tlsOpt.cipherSuites = viper.GetStringSlice("tls-cipher-suites")
@ -269,37 +336,27 @@ func createManager(ctx context.Context, cfg *rest.Config) manager.Manager {
func(config *tls.Config) { tlsConfigSetting(config, tlsOpt) },
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
namespace := viper.GetString(v1.ConfigWatchNamespace)
var namespaces map[string]cache.Config
if namespace != "" {
namespaces = map[string]cache.Config{}
for _, ns := range strings.Split(namespace, ",") {
namespaces[ns] = cache.Config{}
}
}
options := ctrl.Options{
Scheme: scheme,
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
},
WebhookServer: webhook.NewServer(webhook.Options{
MetricsBindAddress: metricsAddr,
Port: webhookPort,
TLSOpts: optionsTlSOptsFuncs,
}),
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "31e04290.jaegertracing.io",
LeaseDuration: &leaseDuration,
RenewDeadline: &renewDeadline,
RetryPeriod: &retryPeriod,
Cache: cache.Options{
DefaultNamespaces: namespaces,
},
Namespace: namespace,
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
}
// Create a new manager to provide shared dependencies and start components
@ -370,9 +427,8 @@ func setupWebhooks(_ context.Context, mgr manager.Manager) {
// register webhook
srv := mgr.GetWebhookServer()
decoder := admission.NewDecoder(mgr.GetScheme())
srv.Register("/mutate-v1-deployment", &webhook.Admission{
Handler: appsv1controllers.NewDeploymentInterceptorWebhook(mgr.GetClient(), decoder),
Handler: appsv1controllers.NewDeploymentInterceptorWebhook(mgr.GetClient()),
})
}

View File

@ -9,7 +9,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/version"
)
@ -28,11 +27,11 @@ func AddFlags(cmd *cobra.Command) {
cmd.Flags().String("jaeger-spark-dependencies-image", "ghcr.io/jaegertracing/spark-dependencies/spark-dependencies", "The Docker image for the Spark Dependencies Job")
cmd.Flags().String("jaeger-es-index-cleaner-image", "jaegertracing/jaeger-es-index-cleaner", "The Docker image for the Jaeger Elasticsearch Index Cleaner")
cmd.Flags().String("jaeger-es-rollover-image", "jaegertracing/jaeger-es-rollover", "The Docker image for the Jaeger Elasticsearch Rollover")
cmd.Flags().String(v1.FlagOpenShiftOauthProxyImage, "quay.io/openshift/origin-oauth-proxy:4.14", "The Docker image location definition for the OpenShift OAuth Proxy")
cmd.Flags().String("openshift-oauth-proxy-image", "quay.io/openshift/origin-oauth-proxy:4.12", "The Docker image location definition for the OpenShift OAuth Proxy")
cmd.Flags().String("openshift-oauth-proxy-imagestream-ns", "", "The namespace for the OpenShift OAuth Proxy imagestream")
cmd.Flags().String("openshift-oauth-proxy-imagestream-name", "", "The name for the OpenShift OAuth Proxy imagestream")
cmd.Flags().String("platform", v1.FlagPlatformAutoDetect, "The target platform the operator will run. Possible values: 'kubernetes', 'openshift', 'auto-detect'")
cmd.Flags().String("es-provision", v1.FlagProvisionElasticsearchAuto, "Whether to auto-provision an Elasticsearch cluster for suitable Jaeger instances. Possible values: 'yes', 'no', 'auto'. When set to 'auto' and the API name 'logging.openshift.io' is available, auto-provisioning is enabled.")
cmd.Flags().String("platform", "auto-detect", "The target platform the operator will run. Possible values: 'kubernetes', 'openshift', 'auto-detect'")
cmd.Flags().String("es-provision", "auto", "Whether to auto-provision an Elasticsearch cluster for suitable Jaeger instances. Possible values: 'yes', 'no', 'auto'. When set to 'auto' and the API name 'logging.openshift.io' is available, auto-provisioning is enabled.")
cmd.Flags().String("kafka-provision", "auto", "Whether to auto-provision a Kafka cluster for suitable Jaeger instances. Possible values: 'yes', 'no', 'auto'. When set to 'auto' and the API name 'kafka.strimzi.io' is available, auto-provisioning is enabled.")
cmd.Flags().Bool("kafka-provisioning-minimal", false, "(unsupported) Whether to provision Kafka clusters with minimal requirements, suitable for demos and tests.")
cmd.Flags().String("secure-listen-address", "", "")
@ -43,17 +42,6 @@ func AddFlags(cmd *cobra.Command) {
cmd.Flags().Bool("leader-elect", false, "Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
_ = viper.BindEnv("jaeger-agent-image", "RELATED_IMAGE_JAEGER_AGENT")
_ = viper.BindEnv("jaeger-query-image", "RELATED_IMAGE_JAEGER_QUERY")
_ = viper.BindEnv("jaeger-collector-image", "RELATED_IMAGE_JAEGER_COLLECTOR")
_ = viper.BindEnv("jaeger-ingester-image", "RELATED_IMAGE_JAEGER_INGESTER")
_ = viper.BindEnv("jaeger-all-in-one-image", "RELATED_IMAGE_JAEGER_ALL_IN_ONE")
_ = viper.BindEnv("jaeger-cassandra-schema-image", "RELATED_IMAGE_CASSANDRA_SCHEMA")
_ = viper.BindEnv("jaeger-spark-dependencies-image", "RELATED_IMAGE_SPARK_DEPENDENCIES")
_ = viper.BindEnv("jaeger-es-index-cleaner-image", "RELATED_IMAGE_JAEGER_ES_INDEX_CLEANER")
_ = viper.BindEnv("jaeger-es-rollover-image", "RELATED_IMAGE_JAEGER_ES_ROLLOVER")
_ = viper.BindEnv(v1.FlagOpenShiftOauthProxyImage, "RELATED_IMAGE_OPENSHIFT_OAUTH_PROXY")
docURL := fmt.Sprintf("https://www.jaegertracing.io/docs/%s", version.DefaultJaegerMajorMinor())
cmd.Flags().String("documentation-url", docURL, "The URL for the 'Documentation' menu item")
}

View File

@ -4,11 +4,11 @@ import (
"fmt"
"strings"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/util"
)
@ -24,7 +24,7 @@ const (
// GetTrustedCABundle returns a trusted CA bundle configmap if platform is OpenShift
func GetTrustedCABundle(jaeger *v1.Jaeger) *corev1.ConfigMap {
// Only configure the trusted CA if running in OpenShift
if autodetect.OperatorConfiguration.GetPlatform() != autodetect.OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return nil
}
@ -59,7 +59,7 @@ func GetTrustedCABundle(jaeger *v1.Jaeger) *corev1.ConfigMap {
// GetServiceCABundle returns a service CA configmap if platform is OpenShift
func GetServiceCABundle(jaeger *v1.Jaeger) *corev1.ConfigMap {
// Only configure the service CA if running in OpenShift
if autodetect.OperatorConfiguration.GetPlatform() != autodetect.OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return nil
}
@ -93,7 +93,7 @@ func GetServiceCABundle(jaeger *v1.Jaeger) *corev1.ConfigMap {
// trusted CA bundle volume and volumeMount, if running on OpenShift
func Update(jaeger *v1.Jaeger, commonSpec *v1.JaegerCommonSpec) {
// Only configure the trusted CA if running in OpenShift
if autodetect.OperatorConfiguration.GetPlatform() != autodetect.OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return
}
@ -130,7 +130,7 @@ func Update(jaeger *v1.Jaeger, commonSpec *v1.JaegerCommonSpec) {
// AddServiceCA will modify the supplied common spec, to include
// the service CA volume and volumeMount, if running on OpenShift
func AddServiceCA(jaeger *v1.Jaeger, commonSpec *v1.JaegerCommonSpec) {
if autodetect.OperatorConfiguration.GetPlatform() != autodetect.OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return
}

View File

@ -9,7 +9,6 @@ import (
"k8s.io/apimachinery/pkg/types"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
)
func TestGetWithoutTrustedCA(t *testing.T) {
@ -30,7 +29,7 @@ func TestGetWithoutTrustedCA(t *testing.T) {
func TestGetWithTrustedCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -46,7 +45,7 @@ func TestGetWithTrustedCA(t *testing.T) {
func TestGetWithServiceCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -61,7 +60,7 @@ func TestGetWithServiceCA(t *testing.T) {
func TestGetWithExistingTrustedCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -79,7 +78,7 @@ func TestGetWithExistingTrustedCA(t *testing.T) {
func TestGetWithExistingServiceCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -108,13 +107,13 @@ func TestUpdateWithoutCAs(t *testing.T) {
AddServiceCA(jaeger, &commonSpec)
// verify
assert.Empty(t, commonSpec.Volumes)
assert.Empty(t, commonSpec.VolumeMounts)
assert.Len(t, commonSpec.Volumes, 0)
assert.Len(t, commonSpec.VolumeMounts, 0)
}
func TestUpdateWithTrustedCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -131,7 +130,7 @@ func TestUpdateWithTrustedCA(t *testing.T) {
func TestUpdateWithExistingTrustedCA(t *testing.T) {
// prepare
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
jaeger := v1.NewJaeger(types.NamespacedName{Name: "my-instance"})
@ -152,6 +151,6 @@ func TestUpdateWithExistingTrustedCA(t *testing.T) {
AddServiceCA(jaeger, &commonSpec)
// verify
assert.Empty(t, commonSpec.Volumes)
assert.Empty(t, commonSpec.VolumeMounts)
assert.Len(t, commonSpec.Volumes, 0)
assert.Len(t, commonSpec.VolumeMounts, 0)
}

View File

@ -91,9 +91,9 @@ func TestUpdateWithSamplingConfigFileOption(t *testing.T) {
commonSpec := v1.JaegerCommonSpec{}
Update(jaeger, &commonSpec, &options)
assert.Empty(t, commonSpec.Volumes)
assert.Empty(t, commonSpec.VolumeMounts)
assert.Empty(t, options)
assert.Len(t, commonSpec.Volumes, 0)
assert.Len(t, commonSpec.VolumeMounts, 0)
assert.Len(t, options, 0)
}
func TestGetWithSamplingConfigFileOption(t *testing.T) {

View File

@ -3,17 +3,17 @@ package tls
import (
"fmt"
"github.com/spf13/viper"
corev1 "k8s.io/api/core/v1"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/service"
"github.com/jaegertracing/jaeger-operator/pkg/util"
)
// Update will mount the tls secret on the collector pod.
func Update(jaeger *v1.Jaeger, commonSpec *v1.JaegerCommonSpec, options *[]string) {
if autodetect.OperatorConfiguration.GetPlatform() != autodetect.OpenShiftPlatform {
if viper.GetString("platform") != v1.FlagPlatformOpenShift {
return
}

View File

@ -3,16 +3,16 @@ package tls
import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
)
func TestUpdateWithTLSSecret(t *testing.T) {
jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestUpdateWithTLSSecret"})
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
commonSpec := v1.JaegerCommonSpec{}
options := []string{}
@ -25,3 +25,18 @@ func TestUpdateWithTLSSecret(t *testing.T) {
assert.Equal(t, "--collector.grpc.tls.cert=/etc/tls-config/tls.crt", options[1])
assert.Equal(t, "--collector.grpc.tls.key=/etc/tls-config/tls.key", options[2])
}
func TestIgnoreDefaultTLSSecretWhenGrpcHostPortIsSet(t *testing.T) {
jaeger := v1.NewJaeger(types.NamespacedName{Name: "TestIgnoreDefaultTLSSecretWhenGrpcHostPortIsSet"})
viper.Set("platform", v1.FlagPlatformOpenShift)
commonSpec := v1.JaegerCommonSpec{}
options := []string{}
options = append(options, "--reporter.grpc.host-port=my.host-port.com")
Update(jaeger, &commonSpec, &options)
assert.Len(t, commonSpec.Volumes, 0)
assert.Len(t, commonSpec.VolumeMounts, 0)
assert.Len(t, options, 1)
assert.Equal(t, "--reporter.grpc.host-port=my.host-port.com", options[0])
}

View File

@ -49,9 +49,9 @@ func TestUpdateNoUIConfig(t *testing.T) {
options := []string{}
Update(jaeger, &commonSpec, &options)
assert.Empty(t, commonSpec.Volumes)
assert.Empty(t, commonSpec.VolumeMounts)
assert.Empty(t, options)
assert.Len(t, commonSpec.Volumes, 0)
assert.Len(t, commonSpec.VolumeMounts, 0)
assert.Len(t, options, 0)
}
func TestUpdateWithUIConfig(t *testing.T) {

View File

@ -51,7 +51,7 @@ func TestUpdateHref(t *testing.T) {
}
link := Get(jaeger, &route)
assert.Equal(t, "", link.Spec.Href)
assert.Equal(t, link.Spec.Href, "")
route.Spec.Host = "namespace.somehostname"
newLinks := UpdateHref([]corev1.Route{route}, []consolev1.ConsoleLink{*link})
assert.Equal(t, fmt.Sprintf("https://%s", route.Spec.Host), newLinks[0].Spec.Href)

View File

@ -5,11 +5,10 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
@ -20,7 +19,7 @@ func TestServiceAccountCreate(t *testing.T) {
// prepare
nsn := types.NamespacedName{Name: "my-instance"}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
}
@ -42,7 +41,7 @@ func TestServiceAccountCreate(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &corev1.ServiceAccount{}
@ -52,7 +51,7 @@ func TestServiceAccountCreate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, persistedName.Name, persisted.Name)
require.NoError(t, err)
assert.NoError(t, err)
}
func TestServiceAccountUpdate(t *testing.T) {
@ -67,7 +66,7 @@ func TestServiceAccountUpdate(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -84,7 +83,7 @@ func TestServiceAccountUpdate(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &corev1.ServiceAccount{}
@ -94,7 +93,7 @@ func TestServiceAccountUpdate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, "new-value", persisted.Annotations["key"])
require.NoError(t, err)
assert.NoError(t, err)
}
func TestServiceAccountDelete(t *testing.T) {
@ -108,7 +107,7 @@ func TestServiceAccountDelete(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -120,7 +119,7 @@ func TestServiceAccountDelete(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &corev1.ServiceAccount{}
@ -130,7 +129,7 @@ func TestServiceAccountDelete(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Empty(t, persisted.Name)
require.Error(t, err) // not found
assert.Error(t, err) // not found
}
func TestAccountCreateExistingNameInAnotherNamespace(t *testing.T) {
@ -144,7 +143,7 @@ func TestAccountCreateExistingNameInAnotherNamespace(t *testing.T) {
Namespace: "tenant2",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
v1.NewJaeger(nsnExisting),
&corev1.ServiceAccount{
@ -174,18 +173,18 @@ func TestAccountCreateExistingNameInAnotherNamespace(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &corev1.ServiceAccount{}
err = cl.Get(context.Background(), nsn, persisted)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, nsn.Name, persisted.Name)
assert.Equal(t, nsn.Namespace, persisted.Namespace)
persistedExisting := &corev1.ServiceAccount{}
err = cl.Get(context.Background(), nsnExisting, persistedExisting)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, nsnExisting.Name, persistedExisting.Name)
assert.Equal(t, nsnExisting.Namespace, persistedExisting.Namespace)
}

View File

@ -6,12 +6,11 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
@ -27,7 +26,7 @@ func TestClusterRoleBindingsCreate(t *testing.T) {
Name: "my-instance",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
}
@ -49,7 +48,7 @@ func TestClusterRoleBindingsCreate(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &rbac.ClusterRoleBinding{}
@ -59,7 +58,7 @@ func TestClusterRoleBindingsCreate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, persistedName.Name, persisted.Name)
require.NoError(t, err)
assert.NoError(t, err)
}
func TestClusterRoleBindingsSkipped(t *testing.T) {
@ -71,7 +70,7 @@ func TestClusterRoleBindingsSkipped(t *testing.T) {
Name: "my-instance",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
}
@ -93,7 +92,7 @@ func TestClusterRoleBindingsSkipped(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &rbac.ClusterRoleBinding{}
@ -122,7 +121,7 @@ func TestClusterRoleBindingsUpdate(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -139,7 +138,7 @@ func TestClusterRoleBindingsUpdate(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &rbac.ClusterRoleBinding{}
@ -149,7 +148,7 @@ func TestClusterRoleBindingsUpdate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, "new-value", persisted.Annotations["key"])
require.NoError(t, err)
assert.NoError(t, err)
}
func TestClusterRoleBindingsDelete(t *testing.T) {
@ -168,7 +167,7 @@ func TestClusterRoleBindingsDelete(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -180,7 +179,7 @@ func TestClusterRoleBindingsDelete(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &rbac.ClusterRoleBinding{}
@ -190,5 +189,5 @@ func TestClusterRoleBindingsDelete(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Empty(t, persisted.Name)
require.Error(t, err) // not found
assert.Error(t, err) // not found
}

View File

@ -6,11 +6,10 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
@ -23,7 +22,7 @@ func TestConfigMapsCreate(t *testing.T) {
Name: "TestConfigMapsCreate",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
}
@ -45,7 +44,7 @@ func TestConfigMapsCreate(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &corev1.ConfigMap{}
@ -55,7 +54,7 @@ func TestConfigMapsCreate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, persistedName.Name, persisted.Name)
require.NoError(t, err)
assert.NoError(t, err)
}
func TestConfigMapsUpdate(t *testing.T) {
@ -72,7 +71,7 @@ func TestConfigMapsUpdate(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -89,7 +88,7 @@ func TestConfigMapsUpdate(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &corev1.ConfigMap{}
@ -99,7 +98,7 @@ func TestConfigMapsUpdate(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Equal(t, "new-value", persisted.Annotations["key"])
require.NoError(t, err)
assert.NoError(t, err)
}
func TestConfigMapsDelete(t *testing.T) {
@ -115,7 +114,7 @@ func TestConfigMapsDelete(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
&orig,
}
@ -127,7 +126,7 @@ func TestConfigMapsDelete(t *testing.T) {
// test
_, err := r.Reconcile(reconcile.Request{NamespacedName: nsn})
require.NoError(t, err)
assert.NoError(t, err)
// verify
persisted := &corev1.ConfigMap{}
@ -137,7 +136,7 @@ func TestConfigMapsDelete(t *testing.T) {
}
err = cl.Get(context.Background(), persistedName, persisted)
assert.Empty(t, persisted.Name)
require.Error(t, err) // not found
assert.Error(t, err) // not found
}
func TestConfigMapCreateExistingNameInAnotherNamespace(t *testing.T) {
@ -151,7 +150,7 @@ func TestConfigMapCreateExistingNameInAnotherNamespace(t *testing.T) {
Namespace: "tenant2",
}
objs := []client.Object{
objs := []runtime.Object{
v1.NewJaeger(nsn),
v1.NewJaeger(nsnExisting),
&corev1.ConfigMap{
@ -181,18 +180,18 @@ func TestConfigMapCreateExistingNameInAnotherNamespace(t *testing.T) {
res, err := r.Reconcile(req)
// verify
require.NoError(t, err)
assert.NoError(t, err)
assert.False(t, res.Requeue, "We don't requeue for now")
persisted := &corev1.ConfigMap{}
err = cl.Get(context.Background(), nsn, persisted)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, nsn.Name, persisted.Name)
assert.Equal(t, nsn.Namespace, persisted.Namespace)
persistedExisting := &corev1.ConfigMap{}
err = cl.Get(context.Background(), nsnExisting, persistedExisting)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, nsnExisting.Name, persistedExisting.Name)
assert.Equal(t, nsnExisting.Namespace, persistedExisting.Namespace)
}
@ -233,7 +232,7 @@ func TestConfigMapsClean(t *testing.T) {
"app.kubernetes.io/managed-by": "jaeger-operator",
}
objs := []client.Object{
objs := []runtime.Object{
trustedCAConfig,
serviceCAConfig,
serviceCAConfigExist,
@ -245,16 +244,16 @@ func TestConfigMapsClean(t *testing.T) {
// The three defined ConfigMaps exist
configMaps := &corev1.ConfigMapList{}
err := cl.List(context.Background(), configMaps)
require.NoError(t, err)
assert.NoError(t, err)
assert.Len(t, configMaps.Items, 3)
// Reconcile non-exist jaeger
_, err = r.Reconcile(reconcile.Request{NamespacedName: nsnNonExist})
require.NoError(t, err)
assert.NoError(t, err)
// Check that configmaps were clean up.
err = cl.List(context.Background(), configMaps)
require.NoError(t, err)
assert.NoError(t, err)
assert.Len(t, configMaps.Items, 1)
assert.Equal(t, fmt.Sprintf("%s-service-ca", nsnExisting.Name), configMaps.Items[0].Name)
}

Some files were not shown because too many files have changed in this diff Show More