Compare commits

..

No commits in common. "main" and "v1.37.0" have entirely different histories.

324 changed files with 5440 additions and 18163 deletions

4
.ci/after-success.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
echo "Uploading code coverage results"
bash <(curl -s https://codecov.io/bash)

View File

@ -1,3 +1,3 @@
#!/bin/bash
./bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)
${GOPATH}/bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)

16
.ci/lint.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
GOLINT=golint
command -v ${GOLINT} > /dev/null
if [ $? != 0 ]; then
if [ -n ${GOPATH} ]; then
GOLINT="${GOPATH}/bin/golint"
fi
fi
out=$(${GOLINT} ./... | grep -v pkg/storage/elasticsearch/v1 | grep -v zz_generated)
if [[ $out ]]; then
echo "$out"
exit 1
fi

View File

@ -5,11 +5,7 @@ if [[ -z $OPERATOR_VERSION ]]; then
exit 1
fi
JAEGER_VERSION=$(echo $JAEGER_VERSION | tr -d '"')
JAEGER_AGENT_VERSION=$(echo $JAEGER_AGENT_VERSION | tr -d '"')
PREVIOUS_VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
@ -24,11 +20,11 @@ sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSI
sed -i "s~all-in-one:.*~all-in-one:${JAEGER_VERSION}~gi" examples/all-in-one-with-options.yaml
# statefulset-manual-sidecar
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
# operator-with-tracing
sed -i "s~jaeger-operator:.*~jaeger-operator:${OPERATOR_VERSION}~gi" examples/operator-with-tracing.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/operator-with-tracing.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_VERSION}~gi" examples/operator-with-tracing.yaml
# tracegen
sed -i "s~jaeger-tracegen:.*~jaeger-tracegen:${JAEGER_VERSION}~gi" examples/tracegen.yaml

3
.ci/upload-test-coverage.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
bash <(curl -s https://codecov.io/bash)

15
.ci/vgot.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
if [ $# = 0 ]; then
echo usage: vgot cmdpackage[@version]... >&2
exit 2
fi
d=`mktemp -d`
cd "$d"
go mod init temp >/dev/null 2>&1
for i; do
pkg=`echo $i | sed 's/@.*//'`
go get -d "$i" &&
go install "$pkg" &&
echo installed `go list -f '{{.ImportPath}}@{{.Module.Version}}' "$pkg"`
done
rm -r "$d"

View File

@ -1,10 +1,3 @@
coverage:
status:
project:
default:
target: auto
# this allows a 0.1% drop from the previous base commit coverage
threshold: 0.1%
ignore:
- "apis/v1/zz_generated.deepcopy.go"
- "apis/v1/zz_generated.defaults.go"

View File

@ -1,54 +1,11 @@
version: 2
updates:
- package-ecosystem: docker
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: docker
directory: "/tests"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- go
- ok-to-test
groups:
golang-org-x:
patterns:
- "golang.org/x/*"
opentelemetry:
patterns:
- "go.opentelemetry.io/*"
prometheus:
patterns:
- "github.com/prometheus-operator/prometheus-operator"
- "github.com/prometheus-operator/prometheus-operator/*"
- "github.com/prometheus/prometheus"
- "github.com/prometheus/prometheus/*"
- "github.com/prometheus/client_go"
- "github.com/prometheus/client_go/*"
kubernetes:
patterns:
- "k8s.io/*"
- "sigs.k8s.io/*"
- package-ecosystem: "github-actions"
directory: "/"
@ -56,7 +13,3 @@ updates:
interval: "daily"
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- github_actions
- ok-to-test

View File

@ -10,22 +10,19 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
basic-checks:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
uses: actions/setup-go@v3
with:
go-version: "1.22"
go-version: 1.17
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
@ -37,6 +34,6 @@ jobs:
run: make install-tools ci
- name: "upload test coverage report"
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
with:
token: ${{ secrets.CODECOV_TOKEN }}
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
run: ./.ci/upload-test-coverage.sh

View File

@ -10,15 +10,8 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
codeql-analyze:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
name: CodeQL Analyze
runs-on: ubuntu-latest
@ -29,24 +22,19 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: "go.mod"
uses: actions/checkout@v3
# Disable CodeQL for tests
# https://github.com/github/codeql/issues/4786
- run: rm -rf ./tests
- name: Initialize CodeQL
uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/init@v2
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/analyze@v2

View File

@ -0,0 +1,34 @@
name: Elasticsearch E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-elasticsearch-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run Elasticsearch E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: elasticsearch
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-examples.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Examples E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-examples-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run examples E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: examples
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-generate.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Generate E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-generate-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run generate E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: generate
kube_version: ${{ matrix.kube-version }}

View File

@ -0,0 +1,34 @@
name: Miscellaneous E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-miscellaneous-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run miscellaneous E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: miscellaneous
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-sidecar.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Sidecar E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-sidecar-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run sidecar E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: sidecar
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-streaming.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Streaming E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-streaming-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run streaming E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: streaming
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-ui.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: UI E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-allinone-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: UI E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: ui
kube_version: ${{ matrix.kube-version }}

34
.github/workflows/e2e-upgrade.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Upgrade E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
run-e2e-upgrade-test-suite:
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.24"
name: Run upgrade E2E tests
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: ./hack/actions/e2e
with:
testsuite_name: upgrade
kube_version: ${{ matrix.kube-version }}

View File

@ -1,84 +0,0 @@
name: E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
permissions:
contents: read
jobs:
e2e:
name: "Run ${{ matrix.testsuite.label }} E2E tests (${{ matrix.kube-version }})"
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
kube-version:
- "1.19"
- "1.30"
testsuite:
- { name: "elasticsearch", label: "Elasticsearch" }
- { name: "examples", label: "Examples" }
- { name: "generate", label: "Generate" }
- { name: "miscellaneous", label: "Miscellaneous" }
- { name: "sidecar", label: "Sidecar" }
- { name: "streaming", label: "Streaming" }
- { name: "ui", label: "UI" }
- { name: "upgrade", label: "Upgrade" }
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: "1.22"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
with:
install: true
- name: Cache Docker layers
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: /tmp/.buildx-cache
key: e2e-${{ github.sha }}
restore-keys: |
e2e-
- name: "Install KIND"
run: ./hack/install/install-kind.sh
shell: bash
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
shell: bash
- name: "Install gomplate"
run: ./hack/install/install-gomplate.sh
shell: bash
- name: "Install dependencies"
run: make install-tools
shell: bash
- name: "Run ${{ matrix.testsuite.label }} E2E test suite on Kube ${{ matrix.kube-version }}"
env:
VERBOSE: "true"
KUBE_VERSION: "${{ matrix.kube-version }}"
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make run-e2e-tests-${{ matrix.testsuite.name }}
shell: bash
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
shell: bash

View File

@ -1,54 +0,0 @@
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '45 13 * * 1'
push:
branches: [ "main" ]
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
with:
sarif_file: results.sarif

View File

@ -6,22 +6,19 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
publish:
runs-on: ubuntu-latest
env:
USER: jaegertracing
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: actions/checkout@v3
- uses: docker/setup-qemu-action@v2.0.0
- uses: docker/setup-buildx-action@v2.0.0
- name: "publishes the images"
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
OPERATOR_VERSION: main

View File

@ -6,25 +6,26 @@ on:
- 'v*'
jobs:
release:
unit-tests:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
steps:
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
uses: actions/setup-go@v3
with:
go-version: "1.22"
go-version: 1.17
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
- name: "install kustomize"
run: ./hack/install/install-kustomize.sh
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: docker/setup-qemu-action@v2.0.0
- uses: docker/setup-buildx-action@v2.0.0
- name: "generate release resources"
run: make release-artifacts USER=jaegertracing

View File

@ -10,16 +10,13 @@ on:
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
operator-sdk-scorecard:
name: "Operator-SDK Scorecard"
runs-on: ubuntu-latest
steps:
- name: "Check out code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v3
- name: "Install KIND"
run: ./hack/install/install-kind.sh
- name: "Install KUTTL"

View File

@ -1,33 +0,0 @@
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gosec
- linters:
- staticcheck
text: "SA1019:"
linters-settings:
goimports:
local-prefixes: github.com/jaegertracing/jaeger-operator
gosimple:
go: "1.22"
linters:
enable:
- bidichk
- errorlint
- gofumpt
- goimports
- gosec
- govet
- misspell
- testifylint
disable:
- errcheck
run:
go: '1.22'
timeout: 10m

View File

@ -1,142 +1,5 @@
Changes by Version
==================
## v1.65.0 (2025-01-22)
* Pin agent version to 1.62.0 ([#2790](https://github.com/jaegertracing/jaeger-operator/pull/2790), [@rubenvp8510](https://github.com/rubenvp8510))
* Added compatibility for Jaeger Operator v1.61.x and v1.62.x ([#2725](https://github.com/jaegertracing/jaeger-operator/pull/2725), [@mooneeb](https://github.com/mooneeb))
## v1.62.0 (2024-10-10)
* TRACING-4238 | Fix gatewat 502 timeout ([#2694](https://github.com/jaegertracing/jaeger-operator/pull/2694), [@pavolloffay](https://github.com/pavolloffay))
* feat: added missing test for elasticsearch reconciler ([#2662](https://github.com/jaegertracing/jaeger-operator/pull/2662), [@Ankit152](https://github.com/Ankit152))
## v1.61.0 (2024-09-16)
* Bump google.golang.org/grpc from 1.66.0 to 1.66.1 ([#2675](https://github.com/jaegertracing/jaeger-operator/pull/2675), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump google.golang.org/grpc from 1.65.0 to 1.66.0 ([#2670](https://github.com/jaegertracing/jaeger-operator/pull/2670), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump the opentelemetry group with 9 updates ([#2668](https://github.com/jaegertracing/jaeger-operator/pull/2668), [@dependabot[bot]](https://github.com/apps/dependabot))
## v1.60.0 (2024-08-13)
* Fix Golang version in go.mod ([#2652](https://github.com/jaegertracing/jaeger-operator/pull/2652), [@iblancasa](https://github.com/iblancasa))
## v1.60.0 (2024-08-09)
* Test on k8s 1.30 ([#2647](https://github.com/jaegertracing/jaeger-operator/pull/2647), [@pavolloffay](https://github.com/pavolloffay))
* Bump go to 1.22 and controller-gen to 1.14 ([#2646](https://github.com/jaegertracing/jaeger-operator/pull/2646), [@pavolloffay](https://github.com/pavolloffay))
## v1.59.0 (2024-08-06)
* Update compatibility matrix for v1.57.x ([#2594](https://github.com/jaegertracing/jaeger-operator/pull/2594), [@mooneeb](https://github.com/mooneeb))
* imagePullSecrets is not set for agent DaemonSet ([#2563](https://github.com/jaegertracing/jaeger-operator/pull/2563), [@antoniomerlin](https://github.com/antoniomerlin))
## v1.57.0 (2024-05-06)
## v1.55.0 (2024-03-22)
* Add server URL to JaegerMetricsStorageSpec ([#2481](https://github.com/jaegertracing/jaeger-operator/pull/2481), [@antoniomerlin](https://github.com/antoniomerlin))
* Use the host set in the Ingess field for the OpenShift Route ([#2409](https://github.com/jaegertracing/jaeger-operator/pull/2409), [@iblancasa](https://github.com/iblancasa))
* Add minimum Kubernetes and OpenShift versions ([#2492](https://github.com/jaegertracing/jaeger-operator/pull/2492), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
## v1.54.0 (2024-02-14)
* apis/v1: add jaeger agent deprecation warning ([#2471](https://github.com/jaegertracing/jaeger-operator/pull/2471), [@frzifus](https://github.com/frzifus))
## V1.53.0 (2024-01-17)
* Choose the newer autoscaling version by default ([#2374](https://github.com/jaegertracing/jaeger-operator/pull/2374), [@iblancasa](https://github.com/iblancasa))
* Upgrade operator-sdk to 1.32.0 ([#2388](https://github.com/jaegertracing/jaeger-operator/pull/2388), [@iblancasa](https://github.com/iblancasa))
* Fix containerImage field and remove statement about failing CI ([#2386](https://github.com/jaegertracing/jaeger-operator/pull/2386), [@iblancasa](https://github.com/iblancasa))
* Fix injection: prefer jaeger in the same namespace ([#2383](https://github.com/jaegertracing/jaeger-operator/pull/2383), [@pavolloffay](https://github.com/pavolloffay))
## v1.52.0 (2023-12-07)
* Add missing container security context settings and tests ([#2354](https://github.com/jaegertracing/jaeger-operator/pull/2354), [@tingeltangelthomas](https://github.com/tingeltangelthomas))
## v1.51.0 (2023-11-17)
* Support configuring images via RELATED_IMAGE_ environment variables ([#2355](https://github.com/jaegertracing/jaeger-operator/pull/2355), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
* Regenerate ES certificated when is close to 1 day for expire ([#2356](https://github.com/jaegertracing/jaeger-operator/pull/2356), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump actions/checkout from 3 to 4 ([#2316](https://github.com/jaegertracing/jaeger-operator/pull/2316), [@dependabot[bot]](https://github.com/apps/dependabot))
* bump grpc to 1.58.3 ([#2346](https://github.com/jaegertracing/jaeger-operator/pull/2346), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump golang version to 1.21 ([#2347](https://github.com/jaegertracing/jaeger-operator/pull/2347), [@rubenvp8510](https://github.com/rubenvp8510))
* Ensure oauth-proxy ImageStream is detected eventually ([#2340](https://github.com/jaegertracing/jaeger-operator/pull/2340), [@bverschueren](https://github.com/bverschueren))
* Check if envFrom has ConfigMapRef set ([#2342](https://github.com/jaegertracing/jaeger-operator/pull/2342), [@edwardecook](https://github.com/edwardecook))
* Bump golang.org/x/net from 0.13.0 to 0.17.0 ([#2343](https://github.com/jaegertracing/jaeger-operator/pull/2343), [@dependabot[bot]](https://github.com/apps/dependabot))
* Fix issue related to new encoding in oauth-proxy image ([#2345](https://github.com/jaegertracing/jaeger-operator/pull/2345), [@iblancasa](https://github.com/iblancasa))
* Always generate new oauth-proxy password ([#2333](https://github.com/jaegertracing/jaeger-operator/pull/2333), [@pavolloffay](https://github.com/pavolloffay))
* Add v1.48.x and v1.49.x to the support map ([#2332](https://github.com/jaegertracing/jaeger-operator/pull/2332), [@ishaqkhattana](https://github.com/ishaqkhattana))
* Pass proxy env vars to operands ([#2330](https://github.com/jaegertracing/jaeger-operator/pull/2330), [@pavolloffay](https://github.com/pavolloffay))
* Protect auth delegator behind a mutex ([#2318](https://github.com/jaegertracing/jaeger-operator/pull/2318), [@iblancasa](https://github.com/iblancasa))
## v1.49.1 (2023-09-07)
* fix: protect the kafka-profision setting behind a mutex ([#2308](https://github.com/jaegertracing/jaeger-operator/pull/2308), [@iblancasa](https://github.com/iblancasa))
## v1.48.1 (2023-09-04)
* Use base image that does not require subscription (centos 9 stream) ([#2313](https://github.com/jaegertracing/jaeger-operator/pull/2313), [@pavolloffay](https://github.com/pavolloffay))
* Update go dependencies to Kubernetes 0.28.1 ([#2301](https://github.com/jaegertracing/jaeger-operator/pull/2301), [@pavolloffay](https://github.com/pavolloffay))
* Protect the ESProvisioning setting behind a mutex ([#2287](https://github.com/jaegertracing/jaeger-operator/pull/2287), [@iblancasa](https://github.com/iblancasa))
## v1.48.0 (2023-08-28)
* Remove the TokenReview after checking we can create it ([#2286](https://github.com/jaegertracing/jaeger-operator/pull/2286), [@iblancasa](https://github.com/iblancasa))
* Fix apiVersion and kind are missing in jaeger-operator generate output ([#2281](https://github.com/jaegertracing/jaeger-operator/pull/2281), [@hiteshwani29](https://github.com/hiteshwani29))
* Fix custom labels for the deployable components in production strategy ([#2277](https://github.com/jaegertracing/jaeger-operator/pull/2277), [@hiteshwani29](https://github.com/hiteshwani29))
* Ensure the OAuth Proxy image detection is run after the platform detection ([#2280](https://github.com/jaegertracing/jaeger-operator/pull/2280), [@iblancasa](https://github.com/iblancasa))
* Added changes to respect env variable set from envFrom configMaps ([#2272](https://github.com/jaegertracing/jaeger-operator/pull/2272), [@hiteshwani29](https://github.com/hiteshwani29))
* Refactor the autodetect module to reduce the number of writes/reads in viper configuration ([#2274](https://github.com/jaegertracing/jaeger-operator/pull/2274), [@iblancasa](https://github.com/iblancasa))
## v1.47.0 (2023-07-12)
* Expose admin ports for agent, collector, and query Deployments via the equivalent Service ([#2262](https://github.com/jaegertracing/jaeger-operator/pull/2262), [@thomaspaulin](https://github.com/thomaspaulin))
* update otel sdk to v1.16.0/v0.39.0 ([#2261](https://github.com/jaegertracing/jaeger-operator/pull/2261), [@frzifus](https://github.com/frzifus))
* Extended compatibility matrix ([#2255](https://github.com/jaegertracing/jaeger-operator/pull/2255), [@shazib-summar](https://github.com/shazib-summar))
* Add support for Kubernetes 1.27 ([#2235](https://github.com/jaegertracing/jaeger-operator/pull/2235), [@iblancasa](https://github.com/iblancasa))
* Jaeger Collector Config: `Lifecycle` and `TerminationGracePeriodSeconds` ([#2242](https://github.com/jaegertracing/jaeger-operator/pull/2242), [@taj-p](https://github.com/taj-p))
## v1.46.0 (2023-06-16)
* Missing exposed port 16685 in query deployments ([#2239](https://github.com/jaegertracing/jaeger-operator/pull/2239), [@iblancasa](https://github.com/iblancasa))
* Use Golang 1.20 ([#2205](https://github.com/jaegertracing/jaeger-operator/pull/2205), [@iblancasa](https://github.com/iblancasa))
* [BugFix] Properly set imagePullPolicy and containerSecurityContext for EsIndexCleaner cronjob container ([#2224](https://github.com/jaegertracing/jaeger-operator/pull/2224), [@michalschott](https://github.com/michalschott))
* Remove resource limitation for the operator pod ([#2221](https://github.com/jaegertracing/jaeger-operator/pull/2221), [@iblancasa](https://github.com/iblancasa))
* Add PriorityClass for AllInOne strategy ([#2218](https://github.com/jaegertracing/jaeger-operator/pull/2218), [@sonofgibs](https://github.com/sonofgibs))
## v1.45.0 (2023-05-16)
## v1.44.0 (2023-04-13)
* Feat: add `NodeSelector` to jaeger collector, query, and ingestor ([#2200](https://github.com/jaegertracing/jaeger-operator/pull/2200), [@AhmedGrati](https://github.com/AhmedGrati))
## v1.43.0 (2023-02-07)
* update operator-sdk to 1.27.0 ([#2178](https://github.com/jaegertracing/jaeger-operator/pull/2178), [@iblancasa](https://github.com/iblancasa))
* Support JaegerCommonSpec in JaegerCassandraCreateSchemaSpec ([#2176](https://github.com/jaegertracing/jaeger-operator/pull/2176), [@haanhvu](https://github.com/haanhvu))
## v1.42.0 (2023-02-07)
* Upgrade Kafka Operator default version to 0.32.0 ([#2150](https://github.com/jaegertracing/jaeger-operator/pull/2150), [@iblancasa](https://github.com/iblancasa))
* Upgrade Kind, Kind images and add Kubernetes 1.26 ([#2161](https://github.com/jaegertracing/jaeger-operator/pull/2161), [@iblancasa](https://github.com/iblancasa))
1.41.1 (2023-01-23)
-------------------
* Fix the Jaeger version for the Jaeger Operator 1.41.x ([#2157](https://github.com/jaegertracing/jaeger-operator/pull/2157), [@iblancasa](https://github.com/iblancasa))
1.40.0 (2022-12-23)
-------------------
* Support e2e tests on multi architecture environment ([#2139](https://github.com/jaegertracing/jaeger-operator/pull/2139), [@jkandasa](https://github.com/jkandasa))
* limit the get of deployments to WATCH_NAMESPACE on sync ([#2126](https://github.com/jaegertracing/jaeger-operator/pull/2126), [@rubenvp8510](https://github.com/rubenvp8510))
* choose first server address ([#2087](https://github.com/jaegertracing/jaeger-operator/pull/2087), [@Efrat19](https://github.com/Efrat19))
* Fix query ingress when using streaming strategy ([#2120](https://github.com/jaegertracing/jaeger-operator/pull/2120), [@kevinearls](https://github.com/kevinearls))
* Fix Liveness Probe for Ingester and Query ([#2122](https://github.com/jaegertracing/jaeger-operator/pull/2122), [@ricoberger](https://github.com/ricoberger))
* Fix for min tls version to v1.2 ([#2119](https://github.com/jaegertracing/jaeger-operator/pull/2119), [@kangsheng89](https://github.com/kangsheng89))
1.39.0 (2022-11-03)
-------------------
* Fix: svc port doesnt match istio convention ([#2101](https://github.com/jaegertracing/jaeger-operator/pull/2101), [@frzifus](https://github.com/frzifus))
1.38.1 (2022-10-11)
-------------------
* Add ability to specify es proxy resources ([#2079](https://github.com/jaegertracing/jaeger-operator/pull/2079), [@rubenvp8510](https://github.com/rubenvp8510))
* Fix: CVE-2022-27664 ([#2081](https://github.com/jaegertracing/jaeger-operator/pull/2081), [@albertlockett](https://github.com/albertlockett))
* Add liveness and readiness probes to injected sidecar ([#2077](https://github.com/jaegertracing/jaeger-operator/pull/2077), [@MacroPower](https://github.com/MacroPower))
* Add http- port prefix to follow istio naming conventions ([#2075](https://github.com/jaegertracing/jaeger-operator/pull/2075), [@cnvergence](https://github.com/cnvergence))
1.38.0 (2022-09-19)
-------------------
* added pathType to ingress ([#2066](https://github.com/jaegertracing/jaeger-operator/pull/2066), [@giautm](https://github.com/giautm))
* set alias enable variable for spark cronjob ([#2061](https://github.com/jaegertracing/jaeger-operator/pull/2061), [@miyunari](https://github.com/miyunari))
* migrate autoscaling v2beta2 to v2 for Kubernetes 1.26 ([#2055](https://github.com/jaegertracing/jaeger-operator/pull/2055), [@iblancasa](https://github.com/iblancasa))
* add container security context support ([#2033](https://github.com/jaegertracing/jaeger-operator/pull/2033), [@mjnagel](https://github.com/mjnagel))
* change verbosity level and message of the log for autoprovisioned kafka ([#2026](https://github.com/jaegertracing/jaeger-operator/pull/2026), [@iblancasa](https://github.com/iblancasa))
1.37.0 (2022-08-11)
-------------------

View File

@ -1,34 +1,18 @@
The following table shows the compatibility of Jaeger Operator with three different components: Kubernetes, Strimzi Operator, and Cert-Manager.
The following table shows the compatibility of jaeger operator with different components, in this particular case we shows Kubernetes and Strimzi operator compatibility
| Jaeger Operator | Kubernetes | Strimzi Operator | Cert-Manager |
|-----------------|----------------|--------------------|--------------|
| v1.62.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.61.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.60.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.59.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.58.x | skipped | skipped | skipped |
| v1.57.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.56.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.55.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.54.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.53.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.52.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.51.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.50.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.49.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.48.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.47.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.46.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.45.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.44.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.43.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.42.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.41.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.40.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.39.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.38.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.37.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.36.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.35.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.34.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.33.x | v1.19 to v1.23 | v0.23 | v1.6.1 |
| Jaeger Operator | Kubernetes | Strimzi Operator | Cert-Manager |
|-----------------|-----------------|--------------------|--------------|
| v1.34.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.33.x | v1.19 to v1.23 | v0.23 | v1.6.1 |
| v1.32.x | v1.19 to v1.21 | v0.23 | v1.6.1 |
| v1.31.x | v1.19 to v1.21 | v0.23 | v1.6.1 |
| v1.30.x | v1.19 to v1.21 | v0.23 | |
| v1.29.x | v1.19 to v1.21 | v0.23 | |
| v1.28.x | v1.19 to v1.21 | v0.23 | |
| v1.27.x | v1.19 to v1.21 | v0.23 | |
| v1.26.x | v1.19 to v1.21 | v0.23 | |
| v1.25.x | v1.19 to v1.21 | v0.23 | |
| v1.24.x | v1.19 to v1.21 | v0.23 | |
| v1.23.x | v1.19 to v1.21 | v0.19, v0.20 | |
| v1.22.x | v1.18 to v1.20 | v0.19 | |

View File

@ -183,8 +183,9 @@ difference are:
* You need to log in your Kubernetes cluster before running the E2E tests
* You need to provide the `USE_KIND_CLUSTER=false` parameter when calling `make`
For instance, to run the `examples` E2E test suite in OpenShift, the command is:
```sh
$ make run-e2e-tests USE_KIND_CLUSTER=false
$ make run-e2e-tests-examples USE_KIND_CLUSTER=false
```
### Developing new E2E tests

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.17 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
@ -21,7 +21,6 @@ COPY pkg/ pkg/
COPY versions.txt versions.txt
ARG JAEGER_VERSION
ARG JAEGER_AGENT_VERSION
ARG VERSION_PKG
ARG VERSION
ARG VERSION_DATE
@ -33,17 +32,17 @@ ARG VERSION_DATE
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
ARG TARGETARCH
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION} -X ${VERSION_PKG}.defaultAgent=${JAEGER_AGENT_VERSION}" -a -o jaeger-operator main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION}" -a -o jaeger-operator main.go
FROM quay.io/centos/centos:stream9
FROM registry.access.redhat.com/ubi8/ubi
ENV USER_UID=1001 \
USER_NAME=jaeger-operator
RUN INSTALL_PKGS="openssl" && \
dnf install -y $INSTALL_PKGS && \
yum install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
dnf clean all && \
yum clean all && \
mkdir /tmp/_working_dir && \
chmod og+w /tmp/_working_dir

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
FROM golang:1.17 as builder
WORKDIR /workspace
@ -17,17 +17,15 @@ COPY tests tests
ENV CGO_ENABLED=0
# Build
ARG TARGETOS
ARG TARGETARCH
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter-otlp -a ./tests/assert-jobs/reporter-otlp/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./query -a ./tests/assert-jobs/query/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./index -a ./tests/assert-jobs/index/main.go
RUN go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
RUN go build -o ./reporter-otlp -a ./tests/assert-jobs/reporter-otlp/main.go
RUN go build -o ./query -a ./tests/assert-jobs/query/main.go
RUN go build -o ./index -a ./tests/assert-jobs/index/main.go
# Use the curl container image to ensure we have curl installed. Also, it is a
# minimal container image
FROM curlimages/curl@sha256:94e9e444bcba979c2ea12e27ae39bee4cd10bc7041a472c4727a558e213744e6
FROM curlimages/curl:7.81.0
WORKDIR /
COPY --from=builder /workspace/reporter .
COPY --from=builder /workspace/reporter-otlp .

182
Makefile
View File

@ -29,37 +29,21 @@ BUNDLE_IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}-bundle:$(addprefix v,${VERSION})
OUTPUT_BINARY ?= "$(BIN_DIR)/jaeger-operator"
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
export JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
# agent was removed in jaeger 1.62.0, and the new versions of jaeger doesn't distribute the images anymore
# for that reason the last version of the agent is 1.62.0 and is pined here so we can update jaeger and maintain
# the latest agent image.
export JAEGER_AGENT_VERSION ?= "1.62.0"
# Kafka and Kafka Operator variables
STORAGE_NAMESPACE ?= "${shell kubectl get sa default -o jsonpath='{.metadata.namespace}' || oc project -q}"
KAFKA_NAMESPACE ?= "kafka"
KAFKA_VERSION ?= 0.32.0
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/${KAFKA_VERSION}/examples/kafka/kafka-persistent-single.yaml"
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${KAFKA_VERSION}/strimzi-cluster-operator-${KAFKA_VERSION}.yaml"
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/0.23.0/examples/kafka/kafka-persistent-single.yaml"
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.23.0/strimzi-cluster-operator-0.23.0.yaml"
# Prometheus Operator variables
PROMETHEUS_OPERATOR_TAG ?= v0.39.0
PROMETHEUS_BUNDLE ?= https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/${PROMETHEUS_OPERATOR_TAG}/bundle.yaml
# Metrics server variables
METRICS_SERVER_TAG ?= v0.6.1
METRICS_SERVER_YAML ?= https://github.com/kubernetes-sigs/metrics-server/releases/download/${METRICS_SERVER_TAG}/components.yaml
# Ingress controller variables
INGRESS_CONTROLLER_TAG ?= v1.0.1
INGRESS_CONTROLLER_YAML ?= https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-${INGRESS_CONTROLLER_TAG}/deploy/static/provider/kind/deploy.yaml
## Location to install tool dependencies
LOCALBIN ?= $(shell pwd)/bin
# Istio binary path and version
ISTIOCTL="bin/istioctl"
# Cert manager version to use
CERTMANAGER_VERSION ?= 1.6.1
CMCTL ?= $(LOCALBIN)/cmctl
# Operator SDK
OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
OPERATOR_SDK_VERSION ?= 1.32.0
# Minimum Kubernetes and OpenShift versions
MIN_KUBERNETES_VERSION ?= 1.19.0
MIN_OPENSHIFT_VERSION ?= 4.12
CMCTL=$(shell pwd)/bin/cmctl
# Operator SDK version to use
OPERATOR_SDK_VERSION ?= 1.22.2
# Use a KIND cluster for the E2E tests
USE_KIND_CLUSTER ?= true
# Is Jaeger Operator installed via OLM?
@ -68,17 +52,8 @@ JAEGER_OLM ?= false
KAFKA_OLM ?= false
# Is Prometheus Operator installed via OLM?
PROMETHEUS_OLM ?= false
# Istio binary path and version
ISTIOCTL ?= $(LOCALBIN)/istioctl
# Tools
CRDOC ?= $(LOCALBIN)/crdoc
KIND ?= $(LOCALBIN)/kind
KUSTOMIZE ?= $(LOCALBIN)/kustomize
$(LOCALBIN):
mkdir -p $(LOCALBIN)
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
@ -86,13 +61,12 @@ else
GOBIN=$(shell go env GOBIN)
endif
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION) -X $(VERSION_PKG).defaultAgent=$(JAEGER_AGENT_VERSION)"
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION)"
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST ?= $(LOCALBIN)/setup-envtest
ENVTEST_K8S_VERSION = 1.30
ENVTEST_K8S_VERSION = 1.24
# Options for KIND version to use
export KUBE_VERSION ?= 1.30
export KUBE_VERSION ?= 1.20
KIND_CONFIG ?= kind-$(KUBE_VERSION).yaml
SCORECARD_TEST_IMG ?= quay.io/operator-framework/scorecard-test:v$(OPERATOR_SDK_VERSION)
@ -121,9 +95,9 @@ endif
all: manager
.PHONY: check
check: install-tools
check:
$(ECHO) Checking...
$(VECHO)./.ci/format.sh > $(FMT_LOG)
$(VECHO)GOPATH=${GOPATH} .ci/format.sh > $(FMT_LOG)
$(VECHO)[ ! -s "$(FMT_LOG)" ] || (echo "Go fmt, license check, or import ordering failures, run 'make format'" | cat - $(FMT_LOG) && false)
ensure-generate-is-noop: VERSION=$(OPERATOR_VERSION)
@ -136,19 +110,24 @@ ensure-generate-is-noop: set-image-controller generate bundle
.PHONY: format
format: install-tools
format:
$(ECHO) Formatting code...
$(VECHO)./.ci/format.sh
$(VECHO)GOPATH=${GOPATH} .ci/format.sh
PHONY: lint
lint: install-tools
lint:
$(ECHO) Linting...
$(VECHO)$(LOCALBIN)/golangci-lint -v run
$(VECHO)GOPATH=${GOPATH} ./.ci/lint.sh
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: security
security:
$(ECHO) Security...
$(VECHO)./bin/gosec -quiet -exclude=G104 ./... 2>/dev/null
.PHONY: build
build: format
$(ECHO) Building...
@ -157,11 +136,11 @@ build: format
.PHONY: docker
docker:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
.PHONY: dockerx
dockerx:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
.PHONY: push
push:
@ -175,7 +154,14 @@ endif
.PHONY: unit-tests
unit-tests: envtest
@echo Running unit tests...
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -p 1 ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
.PHONY: set-max-map-count
set-max-map-count:
# This is not required in OCP 4.1. The node tuning operator configures the property automatically
# when label tuned.openshift.io/elasticsearch=true label is present on the ES pod. The label
# is configured by ES operator.
$(VECHO)minishift ssh -- 'sudo sysctl -w vm.max_map_count=262144' > /dev/null 2>&1 || true
.PHONY: set-node-os-linux
set-node-os-linux:
@ -190,8 +176,7 @@ cert-manager: cmctl
undeploy-cert-manager:
kubectl delete --ignore-not-found=true -f https://github.com/jetstack/cert-manager/releases/download/v${CERTMANAGER_VERSION}/cert-manager.yaml
cmctl: $(CMCTL)
$(CMCTL): $(LOCALBIN)
cmctl:
./hack/install/install-cmctl.sh $(CERTMANAGER_VERSION)
.PHONY: es
@ -229,13 +214,13 @@ deploy-kafka-operator:
ifeq ($(KAFKA_OLM),true)
$(ECHO) Skipping kafka-operator deployment, assuming it has been installed via OperatorHub
else
$(VECHO)curl --fail --location https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.32.0/strimzi-0.32.0.tar.gz --output tests/_build/kafka-operator.tar.gz --create-dirs
$(VECHO)tar xf tests/_build/kafka-operator.tar.gz
$(VECHO)${SED} -i 's/namespace: .*/namespace: ${KAFKA_NAMESPACE}/' strimzi-${KAFKA_VERSION}/install/cluster-operator/*RoleBinding*.yaml
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/020-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/023-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl apply -f strimzi-${KAFKA_VERSION}/install/cluster-operator/ -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-namespaced --clusterrole=strimzi-cluster-operator-namespaced --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --clusterrole=strimzi-entity-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --clusterrole=strimzi-topic-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
$(VECHO)curl --fail --location $(KAFKA_YAML) --output tests/_build/kafka-operator.yaml --create-dirs
$(VECHO)${SED} -i 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' tests/_build/kafka-operator.yaml
$(VECHO) kubectl -n $(KAFKA_NAMESPACE) apply -f tests/_build/kafka-operator.yaml | grep -v "already exists" || true
$(VECHO)kubectl set env deployment strimzi-cluster-operator -n ${KAFKA_NAMESPACE} STRIMZI_NAMESPACE="*"
endif
.PHONY: undeploy-kafka-operator
@ -303,10 +288,10 @@ generate: controller-gen api-docs ## Generate code containing DeepCopy, DeepCopy
test: unit-tests run-e2e-tests
.PHONY: all
all: check format lint build test
all: check format lint security build test
.PHONY: ci
ci: install-tools ensure-generate-is-noop check format lint build unit-tests
ci: ensure-generate-is-noop check format lint security build unit-tests
##@ Deployment
@ -350,23 +335,16 @@ CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(VECHO)./hack/install/install-controller-gen.sh
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(ENVTEST) || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
ENVTEST = $(shell pwd)/bin/setup-envtest
envtest: ## Download envtest-setup locally if necessary.
$(VECHO) GOBIN=$(shell pwd)/bin go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
.PHONY: bundle
bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
$(SED) -i "s#containerImage: quay.io/jaegertracing/jaeger-operator:$(OPERATOR_VERSION)#containerImage: quay.io/jaegertracing/jaeger-operator:$(VERSION)#g" config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/minKubeVersion: .*/minKubeVersion: $(MIN_KUBERNETES_VERSION)/' config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/com.redhat.openshift.versions=.*/com.redhat.openshift.versions=v$(MIN_OPENSHIFT_VERSION)/' bundle.Dockerfile
$(SED) -i 's/com.redhat.openshift.versions: .*/com.redhat.openshift.versions: v$(MIN_OPENSHIFT_VERSION)/' bundle/metadata/annotations.yaml
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --manifests --version $(VERSION) $(BUNDLE_METADATA_OPTS)
$(OPERATOR_SDK) bundle validate ./bundle
./hack/ignore-createdAt-bundle.sh
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
@ -419,6 +397,7 @@ catalog-push: ## Push a catalog image.
.PHONY: start-kind
start-kind: kind
echo $(USE_KIND_CLUSTER)
ifeq ($(USE_KIND_CLUSTER),true)
$(ECHO) Starting KIND cluster...
# Instead of letting KUTTL create the Kind cluster (using the CLI or in the kuttl-tests.yaml
@ -427,28 +406,17 @@ ifeq ($(USE_KIND_CLUSTER),true)
# * Some KUTTL versions are not able to start properly a Kind cluster
# * The cluster will be removed after running KUTTL (this can be disabled). Sometimes,
# the cluster teardown is not done properly and KUTTL can not be run with the --start-kind flag
# When the Kind cluster is not created by Kuttl, the kindContainers parameter
# from kuttl-tests.yaml has not effect so, it is needed to load the container
# images here.
# When the Kind cluster is not created by Kuttl, the
# kindContainers parameter from kuttl-tests.yaml has not effect so, it is needed to load the
# container images here.
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
# Install metrics-server for HPA
$(ECHO)"Installing the metrics-server in the kind cluster"
$(VECHO)kubectl apply -f $(METRICS_SERVER_YAML)
$(VECHO)kubectl patch deployment -n kube-system metrics-server --type "json" -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": --kubelet-insecure-tls}]'
# Install the ingress-controller
$(ECHO)"Installing the Ingress controller in the kind cluster"
$(VECHO)kubectl apply -f $(INGRESS_CONTROLLER_YAML)
# Check the deployments were done properly
$(ECHO)"Checking the metrics-server was deployed properly"
$(VECHO)kubectl wait --for=condition=available deployment/metrics-server -n kube-system --timeout=5m
$(ECHO)"Checking the Ingress controller deployment was done successfully"
$(VECHO)kubectl wait --for=condition=available deployment ingress-nginx-controller -n ingress-nginx --timeout=5m
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.1/deploy/static/provider/kind/deploy.yaml
else
$(ECHO)"KIND cluster creation disabled. Skipping..."
$(ECHO)KIND cluster creation disabled. Skipping...
endif
stop-kind:
$(ECHO)"Stopping the kind cluster"
$(ECHO)Stopping the kind cluster
$(VECHO)kind delete cluster
.PHONY: install-git-hooks
@ -469,26 +437,33 @@ tools: kustomize controller-gen operator-sdk
.PHONY: install-tools
install-tools: operator-sdk
$(VECHO)./hack/install/install-golangci-lint.sh
$(VECHO)./hack/install/install-goimports.sh
$(VECHO)${GO_FLAGS} ./.ci/vgot.sh \
golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports
$(VECHO)./hack/install/install-gosec.sh
.PHONY: kustomize
kustomize: $(KUSTOMIZE)
$(KUSTOMIZE): $(LOCALBIN)
kustomize:
./hack/install/install-kustomize.sh
$(eval KUSTOMIZE=$(shell echo ${PWD}/bin/kustomize))
.PHONY: kuttl
kuttl:
./hack/install/install-kuttl.sh
$(eval KUTTL=$(shell echo ${PWD}/bin/kubectl-kuttl))
.PHONY: kind
kind: $(KIND)
$(KIND): $(LOCALBIN)
kind:
./hack/install/install-kind.sh
$(eval KIND=$(shell echo ${PWD}/bin/kind))
.PHONY: prepare-release
prepare-release:
$(VECHO)./.ci/prepare-release.sh
scorecard-tests: operator-sdk
echo "Operator sdk is $(OPERATOR_SDK)"
$(OPERATOR_SDK) scorecard bundle -w 10m || (echo "scorecard test failed" && exit 1)
echo "Operator sdk is " $(OPERATOR_SDK)
$(OPERATOR_SDK) scorecard bundle -w 600s || (echo "scorecard test failed" && exit 1)
scorecard-tests-local: kind
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
@ -497,12 +472,18 @@ scorecard-tests-local: kind
$(VECHO)kubectl wait --timeout=5m --for=condition=available deployment/coredns -n kube-system
$(VECHO)$(MAKE) scorecard-tests
OPERATOR_SDK = $(shell pwd)/bin/operator-sdk
.PHONY: operator-sdk
operator-sdk: $(OPERATOR_SDK)
$(OPERATOR_SDK): $(LOCALBIN)
test -s $(OPERATOR_SDK) || curl -sLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v${OPERATOR_SDK_VERSION}/operator-sdk_`go env GOOS`_`go env GOARCH`
@chmod +x $(OPERATOR_SDK)
operator-sdk:
@{ \
set -e ;\
[ -d bin ] || mkdir bin ;\
curl -L -o $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v${OPERATOR_SDK_VERSION}/operator-sdk_`go env GOOS`_`go env GOARCH`;\
chmod +x $(OPERATOR_SDK) ;\
}
BIN_LOCAL = $(shell pwd)/bin
CRDOC = $(BIN_LOCAL)/crdoc
api-docs: crdoc kustomize
@{ \
set -e ;\
@ -511,8 +492,9 @@ api-docs: crdoc kustomize
$(CRDOC) --resources $$TMP_DIR/crd-output.yaml --output docs/api.md ;\
}
.PHONY: crdoc
crdoc: $(CRDOC)
$(CRDOC): $(LOCALBIN)
test -s $(CRDOC) || GOBIN=$(LOCALBIN) go install fybrik.io/crdoc@v0.5.2
@chmod +x $(CRDOC)
# Find or download crdoc
crdoc:
ifeq (, $(shell which $(CRDOC)))
@GOBIN=$(BIN_LOCAL) go install fybrik.io/crdoc@v0.5.2
endif

136
README.md
View File

@ -1,4 +1,4 @@
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc] [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/jaegertracing/jaeger-operator/badge)](https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger-operator)
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc]
# Jaeger Operator for Kubernetes
@ -67,143 +67,11 @@ The jaeger Operator *might* work on other untested versions of Strimzi Operator,
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml) to the namespace `jaeger-test`:
```bash
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:main generate | kubectl apply -n jaeger-test -f -
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:master generate | kubectl apply -n jaeger-test -f -
```
It is recommended to deploy the operator instead of generating a static manifest.
## Jaeger V2 Operator
As the Jaeger V2 is released, it is decided that Jaeger V2 will deployed on Kubernetes using [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator). This will benefit both the users of Jaeger and OpenTelemetry. To use Jaeger V2 with OpenTelemetry Operator, the steps are as follows:
* Install the cert-manager in the existing cluster with the command:
```bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `cert-manager` namespace.
* Install the OpenTelemetry Operator by running:
```bash
kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `opentelemetry-operator-system` namespace.
### Using Jaeger with in-memory storage
Once all the resources are ready, create a Jaeger instance as follows:
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-inmemory-instance
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
service:
extensions: [jaeger_storage, jaeger_query]
pipelines:
traces:
receivers: [otlp]
exporters: [jaeger_storage_exporter]
extensions:
jaeger_query:
storage:
traces: memstore
jaeger_storage:
backends:
memstore:
memory:
max_traces: 100000
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
jaeger_storage_exporter:
trace_storage: memstore
EOF
```
To use the in-memory storage ui for Jaeger V2, expose the pod, deployment or the service as follows:
```bash
kubectl port-forward deployment/jaeger-inmemory-instance-collector 8080:16686
```
Or
```bash
kubectl port-forward service/jaeger-inmemory-instance-collector 8080:16686
```
Once done, type `localhost:8080` in the browser to interact with the UI.
[Note] There's an ongoing development in OpenTelemetry Operator where users will be able to interact directly with the UI.
### Using Jaeger with database to store traces
To use Jaeger V2 with the supported database, it is mandatory to create database deployments and they should be in `ready` state [(ref)](https://www.jaegertracing.io/docs/2.0/storage/).
Create a Kubernetes Service that exposes the database pods enabling communication between the database and Jaeger pods.
This can be achieved by creating a service in two ways, first by creating it [manually](https://kubernetes.io/docs/concepts/services-networking/service/) or second by creating it using imperative command.
```bash
kubectl expose pods <pod-name> --port=<port-number> --name=<name-of-the-service>
```
Or
```bash
kubectl expose deployment <deployment-name> --port=<port-number> --name=<name-of-the-service>
```
After the service is created, add the name of the service as an endpoint in their respective config as follows:
* [Cassandra DB](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-cassandra.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
cassandra:
connection:
servers: [<name-of-the-service>]
```
* [ElasticSearch](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-elasticsearch.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
elasticseacrh:
servers: [<name-of-the-service>]
```
Use the modified config to create Jaeger instance with the help of OpenTelemetry Operator.
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-storage-instance # name of your choice
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
# modified config
EOF
```
## Contributing and Developing
Please see [CONTRIBUTING.md](CONTRIBUTING.md).

View File

@ -16,16 +16,12 @@ Steps to release a new version of the Jaeger Operator:
1. Change the `versions.txt `so that it lists the target version of the Jaeger (if it is required). **Don't touch the operator version**: it will be changed automatically in the next step.
2. Confirm that `MIN_KUBERNETES_VERSION` and `MIN_OPENSHIFT_VERSION` in the `Makefile` are still up-to-date, and update them if required.
2. Run `OPERATOR_VERSION=1.30.0 make prepare-release`, using the operator version that will be released.
3. Run the E2E tests in OpenShift as described in [the CONTRIBUTING.md](CONTRIBUTING.md#an-external-cluster-like-openshift) file. The tests will be executed automatically in Kubernetes by the GitHub Actions CI later.
4. Prepare a changelog since last release.
4. Update the release manager schedule.
5. Commit the changes and create a pull request:
```sh
@ -59,14 +55,3 @@ After the PRs have been made it must be ensured that:
- Images listed in the ClusterServiceVersion (CSV) have a versions tag [#1682](https://github.com/jaegertracing/jaeger-operator/issues/1682)
- No `bundle` folder is included in the release
- No foreign CRs like prometheus are in the manifests
## Release managers
The operator should be released within a week after the [Jaeger release](https://github.com/jaegertracing/jaeger/blob/main/RELEASE.md#release-managers).
| Version | Release Manager |
|---------| -------------------------------------------------------- |
| 1.63.0 | [Benedikt Bongartz](https://github.com/frzifus) |
| 1.64.0 | [Pavol Loffay](https://github.com/pavolloffay) |
| 1.65.0 | [Israel Blancas](https://github.com/iblancasa) |
| 1.66.0 | [Ruben Vargas](https://github.com/rubenvp8510) |

View File

@ -5,7 +5,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUnmarshalJSON(t *testing.T) {
@ -29,7 +28,7 @@ func TestUnmarshalJSON(t *testing.T) {
t.Run(name, func(t *testing.T) {
ds := DeploymentStrategy("")
err := json.Unmarshal([]byte(tc.json), &ds)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, tc.expected, ds)
})
}
@ -49,7 +48,7 @@ func TestMarshalJSON(t *testing.T) {
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
data, err := json.Marshal(tc.strategy)
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, tc.expected, string(data))
})
}

View File

@ -4,7 +4,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFreeForm(t *testing.T) {
@ -17,7 +16,7 @@ func TestFreeForm(t *testing.T) {
},
})
json, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.NotNil(t, json)
assert.Equal(t, uiconfig, string(*o.json))
}
@ -27,7 +26,7 @@ func TestFreeFormUnmarhalMarshal(t *testing.T) {
o := NewFreeForm(nil)
o.UnmarshalJSON([]byte(uiconfig))
json, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.NotNil(t, json)
assert.Equal(t, uiconfig, string(*o.json))
}
@ -67,9 +66,9 @@ func TestToMap(t *testing.T) {
f := NewFreeForm(test.m)
got, err := f.GetMap()
if test.err != "" {
require.EqualError(t, err, test.err)
assert.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, test.expected, got)
}
}

View File

@ -1,6 +1,6 @@
// Package v1 contains API Schema definitions for the jaegertracing.io v1 API group
// +kubebuilder:object:generate=true
// +groupName=jaegertracing.io
//+kubebuilder:object:generate=true
//+groupName=jaegertracing.io
package v1
import (

View File

@ -4,7 +4,6 @@ import (
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -21,51 +20,42 @@ const (
// FlagCronJobsVersion represents the version of the Kubernetes CronJob API
FlagCronJobsVersion = "cronjobs-version"
// FlagCronJobsVersionBatchV1 represents the batch/v1 version of the Kubernetes CronJob API, available as of 1.21
// FlagCronJobsVersionBatchV1 represents the batch/v1 version of the kubernetes CronJob API, available as of 1.21
FlagCronJobsVersionBatchV1 = "batch/v1"
// FlagCronJobsVersionBatchV1Beta1 represents the batch/v1beta1 version of the Kubernetes CronJob API, no longer available as of 1.25
// FlagCronJobsVersionBatchV1Beta1 represents the batch/v1beta1 version of the kubernetes CronJob API, no longer available as of 1.25
FlagCronJobsVersionBatchV1Beta1 = "batch/v1beta1"
// FlagAutoscalingVersion represents the version of the Kubernetes Autoscaling API
FlagAutoscalingVersion = "autoscaling-version"
// FlagPlatformKubernetes represents the value for the 'platform' flag for Kubernetes
FlagPlatformKubernetes = "kubernetes"
// FlagAutoscalingVersionV2 represents the v2 version of the Kubernetes Autoscaling API, available as of 1.23
FlagAutoscalingVersionV2 = "autoscaling/v2"
// FlagAutoscalingVersionV2Beta2 represents the v2beta2 version of the Kubernetes Autoscaling API, no longer available as of 1.26
FlagAutoscalingVersionV2Beta2 = "autoscaling/v2beta2"
// FlagPlatform represents the flag to set the platform
FlagPlatform = "platform"
// FlagPlatformOpenShift represents the value for the 'platform' flag for OpenShift
FlagPlatformOpenShift = "openshift"
// FlagPlatformAutoDetect represents the "auto-detect" value for the platform flag
FlagPlatformAutoDetect = "auto-detect"
// FlagESProvision represents the 'es-provision' flag
FlagESProvision = "es-provision"
// FlagProvisionElasticsearchAuto represents the 'auto' value for the 'es-provision' flag
FlagProvisionElasticsearchAuto = "auto"
// FlagProvisionElasticsearchYes represents the value 'yes' for the 'es-provision' flag
FlagProvisionElasticsearchYes = "yes"
// FlagProvisionElasticsearchNo represents the value 'no' for the 'es-provision' flag
FlagProvisionElasticsearchNo = "no"
// FlagProvisionKafkaAuto represents the 'auto' value for the 'kafka-provision' flag
FlagProvisionKafkaAuto = "auto"
// FlagKafkaProvision represents the 'kafka-provision' flag.
FlagKafkaProvision = "kafka-provision"
// FlagProvisionKafkaYes represents the value 'yes' for the 'kafka-provision' flag
FlagProvisionKafkaYes = "yes"
// FlagAuthDelegatorAvailability represents the 'auth-delegator-available' flag.
FlagAuthDelegatorAvailability = "auth-delegator-available"
// FlagOpenShiftOauthProxyImage represents the 'openshift-oauth-proxy-image' flag.
FlagOpenShiftOauthProxyImage = "openshift-oauth-proxy-image"
// FlagProvisionKafkaNo represents the value 'no' for the 'kafka-provision' flag
FlagProvisionKafkaNo = "no"
// IngressSecurityNone disables any form of security for ingress objects (default)
IngressSecurityNone IngressSecurityType = ""
// FlagDefaultIngressClass represents the default Ingress class from the cluster
FlagDefaultIngressClass = "default-ingressclass"
// IngressSecurityNoneExplicit used when the user specifically set it to 'none'
IngressSecurityNoneExplicit IngressSecurityType = "none"
@ -175,8 +165,8 @@ type JaegerStatus struct {
}
// Jaeger is the Schema for the jaegers API
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Jaeger"
// +operator-sdk:csv:customresourcedefinitions:resources={{CronJob,v1beta1},{Pod,v1},{Deployment,apps/v1}, {Ingress,networking/v1},{DaemonSets,apps/v1},{StatefulSets,apps/v1},{ConfigMaps,v1},{Service,v1}}
// +kubebuilder:subresource:status
@ -229,9 +219,6 @@ type JaegerCommonSpec struct {
// +optional
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// +optional
ContainerSecurityContext *v1.SecurityContext `json:"containerSecurityContext,omitempty"`
// +optional
ServiceAccount string `json:"serviceAccount,omitempty"`
@ -283,7 +270,7 @@ type JaegerQuerySpec struct {
// +optional
// TracingEnabled if set to false adds the JAEGER_DISABLED environment flag and removes the injected
// agent container from the query component to disable tracing requests to the query service.
// The default, if omitted, is true
// The default, if ommited, is true
TracingEnabled *bool `json:"tracingEnabled,omitempty"`
// +optional
@ -292,10 +279,6 @@ type JaegerQuerySpec struct {
// +optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Strategy"
Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
}
// JaegerUISpec defines the options to be used to configure the UI
@ -327,9 +310,6 @@ type JaegerIngressSpec struct {
// +listType=atomic
Hosts []string `json:"hosts,omitempty"`
// +optional
PathType networkingv1.PathType `json:"pathType,omitempty"`
// +optional
// +listType=atomic
TLS []JaegerIngressTLSSpec `json:"tls,omitempty"`
@ -374,10 +354,6 @@ type JaegerIngressOpenShiftSpec struct {
// SkipLogout tells the operator to not automatically add a "Log Out" menu option to the custom Jaeger configuration
// +optional
SkipLogout *bool `json:"skipLogout,omitempty"`
// Timeout defines client timeout from oauth-proxy to jaeger.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
}
// JaegerAllInOneSpec defines the options to be used when deploying the query
@ -402,15 +378,12 @@ type JaegerAllInOneSpec struct {
// +optional
// TracingEnabled if set to false adds the JAEGER_DISABLED environment flag and removes the injected
// agent container from the query component to disable tracing requests to the query service.
// The default, if omitted, is true
// The default, if ommited, is true
TracingEnabled *bool `json:"tracingEnabled,omitempty"`
// +optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Strategy"
Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"`
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// AutoScaleSpec defines the common elements used for create HPAs
@ -429,6 +402,7 @@ type AutoScaleSpec struct {
// JaegerCollectorSpec defines the options to be used when deploying the collector
type JaegerCollectorSpec struct {
// +optional
AutoScaleSpec `json:",inline,omitempty"`
@ -466,16 +440,6 @@ type JaegerCollectorSpec struct {
// +optional
KafkaSecretName string `json:"kafkaSecretName"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// +optional
Lifecycle *v1.Lifecycle `json:"lifecycle,omitempty"`
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
}
// JaegerIngesterSpec defines the options to be used when deploying the ingester
@ -506,10 +470,6 @@ type JaegerIngesterSpec struct {
// +optional
KafkaSecretName string `json:"kafkaSecretName"`
// +optional
// +nullable
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
}
// JaegerAgentSpec defines the options to be used when deploying the agent
@ -580,9 +540,6 @@ type JaegerStorageSpec struct {
type JaegerMetricsStorageSpec struct {
// +optional
Type JaegerStorageType `json:"type,omitempty"`
// +optional
ServerUrl string `json:"server-url,omitempty"`
}
// ElasticsearchSpec represents the ES configuration options that we pass down to the OpenShift Elasticsearch operator.
@ -622,9 +579,6 @@ type ElasticsearchSpec struct {
// +optional
// +listType=atomic
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// +optional
ProxyResources *v1.ResourceRequirements `json:"proxyResources,omitempty"`
}
// JaegerCassandraCreateSchemaSpec holds the options related to the create-schema batch job

View File

@ -12,7 +12,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
const (
@ -35,6 +35,8 @@ func (j *Jaeger) SetupWebhookWithManager(mgr ctrl.Manager) error {
//+kubebuilder:webhook:path=/mutate-jaegertracing-io-v1-jaeger,mutating=true,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=mjaeger.kb.io,admissionReviewVersions={v1}
var _ webhook.Defaulter = &Jaeger{}
func (j *Jaeger) objsWithOptions() []*Options {
return []*Options{
&j.Spec.AllInOne.Options, &j.Spec.Query.Options, &j.Spec.Collector.Options,
@ -45,7 +47,6 @@ func (j *Jaeger) objsWithOptions() []*Options {
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (j *Jaeger) Default() {
jaegerlog.Info("default", "name", j.Name)
jaegerlog.Info("WARNING jaeger-agent is deprecated and will removed in v1.55.0. See https://github.com/jaegertracing/jaeger/issues/4739", "component", "agent")
if j.Spec.Storage.Elasticsearch.Name == "" {
j.Spec.Storage.Elasticsearch.Name = defaultElasticsearchName
@ -82,14 +83,16 @@ func (j *Jaeger) Default() {
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-jaegertracing-io-v1-jaeger,mutating=false,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=vjaeger.kb.io,admissionReviewVersions={v1}
var _ webhook.Validator = &Jaeger{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateCreate() (admission.Warnings, error) {
func (j *Jaeger) ValidateCreate() error {
jaegerlog.Info("validate create", "name", j.Name)
return j.ValidateUpdate(nil)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
func (j *Jaeger) ValidateUpdate(_ runtime.Object) error {
jaegerlog.Info("validate update", "name", j.Name)
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
@ -100,24 +103,24 @@ func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
Name: j.Spec.Storage.Elasticsearch.Name,
}, es)
if errors.IsNotFound(err) {
return nil, fmt.Errorf("elasticsearch instance not found: %w", err)
return fmt.Errorf("elasticsearch instance not found: %v", err)
}
}
for _, opt := range j.objsWithOptions() {
got := opt.DeepCopy().ToArgs()
if f := getAdditionalTLSFlags(got); f != nil {
return nil, fmt.Errorf("tls flags incomplete, got: %v", got)
return fmt.Errorf("tls flags incomplete, got: %v", got)
}
}
return nil, nil
return nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateDelete() (admission.Warnings, error) {
func (j *Jaeger) ValidateDelete() error {
jaegerlog.Info("validate delete", "name", j.Name)
return nil, nil
return nil
}
// OpenShiftElasticsearchNodeCount returns total node count of Elasticsearch nodes.

View File

@ -4,21 +4,15 @@ import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
_ webhook.Defaulter = &Jaeger{}
_ webhook.Validator = &Jaeger{}
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func TestDefault(t *testing.T) {
@ -171,8 +165,8 @@ func TestDefault(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
esv1.AddToScheme(scheme.Scheme)
AddToScheme(scheme.Scheme)
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objs...).Build()
cl = fakeCl
@ -183,9 +177,7 @@ func TestDefault(t *testing.T) {
}
func TestValidateDelete(t *testing.T) {
warnings, err := new(Jaeger).ValidateDelete()
assert.Nil(t, warnings)
require.NoError(t, err)
assert.Nil(t, new(Jaeger).ValidateDelete())
}
func TestValidate(t *testing.T) {
@ -278,19 +270,18 @@ func TestValidate(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
esv1.AddToScheme(scheme.Scheme)
AddToScheme(scheme.Scheme)
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objsToCreate...).Build()
cl = fakeCl
warnings, err := test.current.ValidateCreate()
err := test.current.ValidateCreate()
if test.err != "" {
require.Error(t, err)
assert.NotNil(t, err)
assert.Equal(t, test.err, err.Error())
} else {
require.NoError(t, err)
assert.Nil(t, err)
}
assert.Nil(t, warnings)
})
}
}

View File

@ -1,14 +1,11 @@
package v1
import (
"github.com/go-logr/logr"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
import log "github.com/sirupsen/logrus"
// Logger returns a logger filled with context-related fields, such as Name and Namespace
func (j *Jaeger) Logger() logr.Logger {
return logf.Log.WithValues(
"instance", j.Name,
"namespace", j.Namespace,
)
func (j *Jaeger) Logger() *log.Entry {
return log.WithFields(log.Fields{
"instance": j.Name,
"namespace": j.Namespace,
})
}

View File

@ -14,12 +14,12 @@ type Values map[string]interface{}
func (v *Values) DeepCopy() *Values {
out := make(Values, len(*v))
for key, val := range *v {
switch val := val.(type) {
switch val.(type) {
case string:
out[key] = val
case []string:
out[key] = append([]string(nil), val...)
out[key] = append([]string(nil), val.([]string)...)
}
}
return &out
@ -132,11 +132,11 @@ func (o *Options) ToArgs() []string {
if len(o.opts) > 0 {
args := make([]string, 0, len(o.opts))
for k, v := range o.opts {
switch v := v.(type) {
switch v.(type) {
case string:
args = append(args, fmt.Sprintf("--%s=%v", k, v))
case []string:
for _, vv := range v {
for _, vv := range v.([]string) {
args = append(args, fmt.Sprintf("--%s=%v", k, vv))
}
}
@ -157,9 +157,9 @@ func (o *Options) Map() map[string]interface{} {
func (o *Options) StringMap() map[string]string {
smap := make(map[string]string)
for k, v := range o.opts {
switch v := v.(type) {
switch v.(type) {
case string:
smap[k] = v
smap[k] = v.(string)
}
}
return smap

View File

@ -18,7 +18,7 @@ func TestSimpleOption(t *testing.T) {
func TestNoOptions(t *testing.T) {
o := Options{}
assert.Empty(t, o.ToArgs())
assert.Len(t, o.ToArgs(), 0)
}
func TestNestedOption(t *testing.T) {
@ -40,7 +40,7 @@ func TestMarshalling(t *testing.T) {
})
b, err := json.Marshal(o)
require.NoError(t, err)
assert.NoError(t, err)
s := string(b)
assert.Contains(t, s, `"es.password":"changeme"`)
assert.Contains(t, s, `"es.server-urls":"http://elasticsearch.default.svc:9200"`)
@ -85,9 +85,9 @@ func TestUnmarshalToArgs(t *testing.T) {
opts := Options{}
err := opts.UnmarshalJSON([]byte(test.in))
if test.err != "" {
require.EqualError(t, err, test.err)
assert.EqualError(t, err, test.err)
} else {
require.NoError(t, err)
assert.NoError(t, err)
args := opts.ToArgs()
sort.SliceStable(args, func(i, j int) bool {
return args[i] < args[j]
@ -129,7 +129,7 @@ func TestMarshallRaw(t *testing.T) {
o := NewOptions(nil)
o.json = &json
bytes, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, bytes, json)
}
@ -137,7 +137,7 @@ func TestMarshallEmpty(t *testing.T) {
o := NewOptions(nil)
json := []byte(`{}`)
bytes, err := o.MarshalJSON()
require.NoError(t, err)
assert.NoError(t, err)
assert.Equal(t, bytes, json)
}
@ -151,7 +151,7 @@ func TestUpdate(t *testing.T) {
o.Map()["key"] = "new"
// verify
assert.Equal(t, "new", o.opts["key"])
assert.Equal(t, o.opts["key"], "new")
}
func TestStringMap(t *testing.T) {
@ -170,7 +170,7 @@ func TestDeepCopy(t *testing.T) {
require.NoError(t, err)
copy := o1.opts.DeepCopy()
assert.Equal(t, &(o1.opts), copy)
assert.Equal(t, copy, &(o1.opts))
}
func TestRepetitiveArguments(t *testing.T) {
@ -186,4 +186,5 @@ func TestRepetitiveArguments(t *testing.T) {
assert.Len(t, args, 3)
assert.Equal(t, expected, args)
}

View File

@ -1,4 +1,5 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
@ -7,7 +8,6 @@ package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@ -69,11 +69,6 @@ func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ProxyResources != nil {
in, out := &in.ProxyResources, &out.ProxyResources
*out = new(corev1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec.
@ -256,23 +251,6 @@ func (in *JaegerCollectorSpec) DeepCopyInto(out *JaegerCollectorSpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerCollectorSpec.
@ -334,11 +312,6 @@ func (in *JaegerCommonSpec) DeepCopyInto(out *JaegerCommonSpec) {
*out = new(corev1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ContainerSecurityContext != nil {
in, out := &in.ContainerSecurityContext, &out.ContainerSecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
*out = new(corev1.Probe)
@ -496,13 +469,6 @@ func (in *JaegerIngesterSpec) DeepCopyInto(out *JaegerIngesterSpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerIngesterSpec.
@ -528,11 +494,6 @@ func (in *JaegerIngressOpenShiftSpec) DeepCopyInto(out *JaegerIngressOpenShiftSp
*out = new(bool)
**out = **in
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerIngressOpenShiftSpec.
@ -673,13 +634,6 @@ func (in *JaegerQuerySpec) DeepCopyInto(out *JaegerQuerySpec) {
*out = new(appsv1.DeploymentStrategy)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JaegerQuerySpec.

View File

@ -11,9 +11,6 @@ LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.13.0+git
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
# OpenShift specific labels.
LABEL com.redhat.openshift.versions=v4.12
# Copy files to locations specified by labels.
COPY bundle/manifests /manifests/
COPY bundle/metadata /metadata/

View File

@ -18,16 +18,16 @@ metadata:
capabilities: Deep Insights
categories: Logging & Tracing
certified: "false"
containerImage: quay.io/jaegertracing/jaeger-operator:1.62.0
createdAt: "2025-01-22T20:40:19Z"
containerImage: quay.io/jaegertracing/jaeger-operator
createdAt: "2019-09-04T13:28:40+00:00"
description: Provides tracing, monitoring and troubleshooting for microservices-based
distributed systems
operators.openshift.io/infrastructure-features: '["disconnected"]'
operators.operatorframework.io/builder: operator-sdk-v1.32.0
operators.operatorframework.io/builder: operator-sdk-v1.22.2
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/jaegertracing/jaeger-operator
support: Jaeger Community
name: jaeger-operator.v1.65.0
name: jaeger-operator.v1.37.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@ -336,7 +336,6 @@ spec:
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- create
@ -370,12 +369,6 @@ spec:
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create
serviceAccountName: jaeger-operator
deployments:
- label:
@ -410,11 +403,7 @@ spec:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: jaeger-operator
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
image: quay.io/jaegertracing/jaeger-operator:1.65.0
image: quay.io/jaegertracing/jaeger-operator:1.37.0
livenessProbe:
httpGet:
path: /healthz
@ -433,6 +422,9 @@ spec:
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
@ -447,7 +439,7 @@ spec:
- --upstream=http://127.0.0.1:8383/
- --logtostderr=true
- --v=0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
@ -524,14 +516,13 @@ spec:
- email: jaeger-tracing@googlegroups.com
name: Jaeger Google Group
maturity: alpha
minKubeVersion: 1.19.0
provider:
name: CNCF
replaces: jaeger-operator.v1.62.0
replaces: jaeger-operator.v1.36.0
selector:
matchLabels:
name: jaeger-operator
version: 1.65.0
version: 1.37.0
webhookdefinitions:
- admissionReviewVersions:
- v1

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,3 @@ annotations:
operators.operatorframework.io.metrics.builder: operator-sdk-v1.13.0+git
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
# OpenShift annotations
com.redhat.openshift.versions: v4.12

View File

@ -8,7 +8,7 @@ stages:
- entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: basic
test: basic-check-spec-test
@ -18,7 +18,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-bundle-validation-test
@ -28,7 +28,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-crds-have-validation-test
@ -38,7 +38,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-crds-have-resources-test
@ -48,7 +48,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-spec-descriptors-test
@ -58,7 +58,7 @@ stages:
- entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-status-descriptors-test

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@ spec:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8383/"

View File

@ -5,4 +5,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/jaegertracing/jaeger-operator
newTag: 1.65.0
newTag: 1.37.0

View File

@ -15,69 +15,44 @@ spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /jaeger-operator
args:
- start
- --leader-elect
image: controller:latest
name: jaeger-operator
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 128Mi
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: "jaeger-operator"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- command:
- /jaeger-operator
args:
- start
- --leader-elect
image: controller:latest
name: jaeger-operator
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: "jaeger-operator"
serviceAccountName: jaeger-operator
terminationGracePeriodSeconds: 10

View File

@ -15,8 +15,8 @@ metadata:
capabilities: Deep Insights
categories: Logging & Tracing
certified: "false"
containerImage: quay.io/jaegertracing/jaeger-operator:1.62.0
createdAt: "2023-05-16T04:47:12Z"
containerImage: quay.io/jaegertracing/jaeger-operator
createdAt: "2019-09-04T13:28:40+00:00"
description: Provides tracing, monitoring and troubleshooting for microservices-based
distributed systems
operators.openshift.io/infrastructure-features: '["disconnected"]'
@ -122,10 +122,9 @@ spec:
- email: jaeger-tracing@googlegroups.com
name: Jaeger Google Group
maturity: alpha
minKubeVersion: 1.19.0
provider:
name: CNCF
replaces: jaeger-operator.v1.62.0
replaces: jaeger-operator.v1.36.0
selector:
matchLabels:
name: jaeger-operator

View File

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../default
components:
- ./patch

View File

@ -1,40 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: jaeger-operator-metrics-reader
- patch: |
- op: replace
path: /kind
value: Role
target:
group: rbac.authorization.k8s.io
kind: ClusterRole
- patch: |
- op: replace
path: /roleRef/kind
value: Role
target:
group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
- patch: |
- op: replace
path: /kind
value: RoleBinding
target:
group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
- target:
group: apps
version: v1
name: jaeger-operator
kind: Deployment
patch: |-
- op: replace
path: /spec/template/spec/containers/0/env/0/valueFrom/fieldRef/fieldPath
value: metadata.namespace

View File

@ -2,6 +2,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
@ -221,7 +222,6 @@ rules:
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- create
@ -255,9 +255,3 @@ rules:
- patch
- update
- watch
- apiGroups:
- route.openshift.io
resources:
- routes/custom-host
verbs:
- create

View File

@ -2,6 +2,7 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
@ -48,6 +49,7 @@ webhooks:
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@ -4,8 +4,8 @@ import (
"context"
"encoding/json"
"net/http"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
@ -15,7 +15,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@ -25,13 +24,15 @@ import (
"github.com/jaegertracing/jaeger-operator/pkg/tracing"
)
var _ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
var (
_ admission.DecoderInjector = (*deploymentInterceptor)(nil)
_ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
)
// NewDeploymentInterceptorWebhook creates a new deployment mutating webhook to be registered
func NewDeploymentInterceptorWebhook(c client.Client, decoder *admission.Decoder) webhook.AdmissionHandler {
func NewDeploymentInterceptorWebhook(c client.Client) webhook.AdmissionHandler {
return &deploymentInterceptor{
client: c,
decoder: decoder,
client: c,
}
}
@ -49,18 +50,6 @@ type deploymentInterceptor struct {
decoder *admission.Decoder
}
func (d *deploymentInterceptor) shouldHandleDeployment(req admission.Request) bool {
if namespaces := viper.GetString(v1.ConfigWatchNamespace); namespaces != v1.WatchAllNamespaces {
for _, ns := range strings.Split(namespaces, ",") {
if strings.EqualFold(ns, req.Namespace) {
return true
}
}
return false
}
return true
}
// Handle adds a label to a generated pod if deployment or namespace provide annotaion
func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Request) admission.Response {
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
@ -70,26 +59,22 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
attribute.String("name", req.Name),
attribute.String("namespace", req.Namespace),
)
if !d.shouldHandleDeployment(req) {
return admission.Allowed("not watching in namespace, we do not touch the deployment")
}
defer span.End()
logger := log.Log.WithValues("namespace", req.Namespace)
logger.V(-1).Info("verify deployment")
logger := log.WithField("namespace", req.Namespace)
logger.Debug("verify deployment")
dep := &appsv1.Deployment{}
err := d.decoder.Decode(req, dep)
if err != nil {
logger.Error(err, "failed to decode deployment")
logger.WithError(err).Error("failed to decode deployment")
return admission.Errored(http.StatusBadRequest, err)
}
if dep.Labels["app"] == "jaeger" && dep.Labels["app.kubernetes.io/component"] != "query" {
// Don't touch jaeger deployments
return admission.Allowed("is jaeger deployment, we do not touch it")
}
ns := &corev1.Namespace{}
@ -97,7 +82,7 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
// we shouldn't fail if the namespace object can't be obtained
if err != nil {
msg := "failed to get the namespace for the deployment, skipping injection based on namespace annotation"
logger.Error(err, msg)
logger.WithError(err).Debug(msg)
span.AddEvent(msg, trace.WithAttributes(attribute.String("error", err.Error())))
}
@ -109,21 +94,21 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
}
if err := d.client.List(ctx, jaegers, opts...); err != nil {
logger.Error(err, "failed to get the available Jaeger pods")
logger.WithError(err).Error("failed to get the available Jaeger pods")
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
}
if inject.Needed(dep, ns) {
jaeger := inject.Select(dep, ns, jaegers)
if jaeger != nil && jaeger.GetDeletionTimestamp() == nil {
logger := logger.WithValues(
"jaeger", jaeger.Name,
"jaeger-namespace", jaeger.Namespace,
)
logger := logger.WithFields(log.Fields{
"jaeger": jaeger.Name,
"jaeger-namespace": jaeger.Namespace,
})
if jaeger.Namespace != dep.Namespace {
if err := reconcileConfigMaps(ctx, d.client, jaeger, dep); err != nil {
const msg = "failed to reconcile config maps for the namespace"
logger.Error(err, msg)
logger.WithError(err).Error(msg)
span.AddEvent(msg)
}
}
@ -136,9 +121,7 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
span.AddEvent(msg)
}
envConfigMaps := corev1.ConfigMapList{}
d.client.List(ctx, &envConfigMaps, client.InNamespace(dep.Namespace))
dep = inject.Sidecar(jaeger, dep, inject.WithEnvFromConfigMaps(inject.GetConfigMapsMatchedEnvFromInDeployment(*dep, envConfigMaps.Items)))
dep = inject.Sidecar(jaeger, dep)
marshaledDeploy, err := json.Marshal(dep)
if err != nil {
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
@ -149,7 +132,7 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
const msg = "no suitable Jaeger instances found to inject a sidecar"
span.AddEvent(msg)
logger.V(-1).Info(msg)
logger.Debug(msg)
return admission.Allowed(msg)
}
@ -166,6 +149,7 @@ func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Reques
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledDeploy)
}
}
return admission.Allowed("no action needed")
}

View File

@ -24,7 +24,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
@ -94,7 +93,8 @@ func TestReconcileConfigMaps(t *testing.T) {
errors: tC.errors,
}
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
viper.Set("platform", v1.FlagPlatformOpenShift)
defer viper.Reset()
// test
err := reconcileConfigMaps(context.Background(), cl, jaeger, &dep)
@ -130,11 +130,11 @@ func (u *failingClient) List(ctx context.Context, list client.ObjectList, opts .
return u.WithWatch.List(ctx, list, opts...)
}
func (u *failingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
func (u *failingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
if u.errors.getErr != nil {
return u.errors.getErr
}
return u.WithWatch.Get(ctx, key, obj, opts...)
return u.WithWatch.Get(ctx, key, obj)
}
func (u *failingClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
@ -166,7 +166,6 @@ func TestReconcilieDeployment(t *testing.T) {
resp admission.Response
errors errorGroup
emptyRequest bool
watch_ns string
}{
{
desc: "no content to decode",
@ -244,8 +243,8 @@ func TestReconcilieDeployment(t *testing.T) {
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "is jaeger deployment, we do not touch it",
Code: 200,
Reason: "is jaeger deployment, we do not touch it",
Code: 200,
},
},
},
@ -314,8 +313,8 @@ func TestReconcilieDeployment(t *testing.T) {
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "no suitable Jaeger instances found to inject a sidecar",
Code: 200,
Reason: "no suitable Jaeger instances found to inject a sidecar",
Code: 200,
},
},
},
@ -352,36 +351,10 @@ func TestReconcilieDeployment(t *testing.T) {
},
jaeger: &v1.Jaeger{},
},
{
desc: "should not touch deployment on other namespaces != watch_namespaces",
dep: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{},
Labels: map[string]string{
"app": "not jaeger",
},
},
Spec: appsv1.DeploymentSpec{},
},
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "not watching in namespace, we do not touch the deployment",
Code: 200,
},
},
},
watch_ns: "my-other-ns, other-ns-2",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
viper.Set(v1.ConfigWatchNamespace, tc.watch_ns)
defer viper.Reset()
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Namespace,
@ -398,8 +371,7 @@ func TestReconcilieDeployment(t *testing.T) {
errors: tc.errors,
}
decoder := admission.NewDecoder(scheme.Scheme)
r := NewDeploymentInterceptorWebhook(cl, decoder)
r := NewDeploymentInterceptorWebhook(cl)
req := admission.Request{}
if !tc.emptyRequest {
@ -420,6 +392,9 @@ func TestReconcilieDeployment(t *testing.T) {
}
}
decoder, err := admission.NewDecoder(s)
require.NoError(t, err)
admission.InjectDecoderInto(decoder, r)
resp := r.Handle(context.Background(), req)
assert.Len(t, resp.Patches, len(tc.resp.Patches))
@ -431,6 +406,8 @@ func TestReconcilieDeployment(t *testing.T) {
})
assert.Equal(t, tc.resp, resp)
require.NoError(t, err)
})
}
}

View File

@ -1,16 +1,13 @@
package appsv1_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/appsv1"
)
@ -30,26 +27,5 @@ func TestNamespaceControllerRegisterWithManager(t *testing.T) {
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewNamespaceInstance(t *testing.T) {
// prepare
nsn := types.NamespacedName{Name: "my-instance", Namespace: "default"}
reconciler := appsv1.NewNamespaceReconciler(
k8sClient,
k8sClient,
testScheme,
)
instance := v1.NewJaeger(nsn)
err := k8sClient.Create(context.Background(), instance)
require.NoError(t, err)
req := k8sreconcile.Request{
NamespacedName: nsn,
}
_, err = reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
assert.NoError(t, err)
}

View File

@ -15,11 +15,9 @@ import (
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
var k8sClient client.Client
var testEnv *envtest.Environment
var testScheme *runtime.Scheme = scheme.Scheme
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{

View File

@ -1,77 +0,0 @@
package elasticsearch_test
import (
"context"
"testing"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/elasticsearch"
)
func TestElasticSearchSetupWithManager(t *testing.T) {
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
// prepare
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
require.NoError(t, err)
reconciler := elasticsearch.NewReconciler(
k8sClient,
k8sClient,
)
// test
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewElasticSearchInstance(t *testing.T) {
// prepare
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ns",
},
}
es := &esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{
Name: "test-es",
Namespace: "test-ns",
},
}
jaeger := v1.NewJaeger(types.NamespacedName{
Name: "test-jaeger",
Namespace: "test-jaeger",
})
esv1.AddToScheme(testScheme)
v1.AddToScheme(testScheme)
client := fake.NewClientBuilder().WithRuntimeObjects(ns, es, jaeger).Build()
reconciler := elasticsearch.NewReconciler(
client,
client,
)
req := k8sreconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-es",
Namespace: "test-ns",
},
}
_, err := reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
}

View File

@ -1,57 +0,0 @@
package elasticsearch_test
import (
"fmt"
"os"
"path/filepath"
"testing"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
cfg, err := testEnv.Start()
if err != nil {
fmt.Printf("failed to start testEnv: %v", err)
os.Exit(1)
}
if err := v1.AddToScheme(scheme.Scheme); err != nil {
fmt.Printf("failed to register scheme: %v", err)
os.Exit(1)
}
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
if err != nil {
fmt.Printf("failed to setup a Kubernetes client: %v", err)
os.Exit(1)
}
code := m.Run()
err = testEnv.Stop()
if err != nil {
fmt.Printf("failed to stop testEnv: %v", err)
os.Exit(1)
}
os.Exit(code)
}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/operator-framework/operator-lib/handler"
@ -48,9 +49,8 @@ func NewReconciler(client client.Client, clientReader client.Reader, scheme *run
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses;ingressclasses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=create
// +kubebuilder:rbac:groups=console.openshift.io,resources=consolelinks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch,resources=jobs;cronjobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete
@ -69,10 +69,7 @@ func (r *JaegerReconciler) Reconcile(ctx context.Context, request ctrl.Request)
func (r *JaegerReconciler) SetupWithManager(mgr ctrl.Manager) error {
err := ctrl.NewControllerManagedBy(mgr).
For(&v1.Jaeger{}).
Watches(
&v1.Jaeger{},
&handler.InstrumentedEnqueueRequestForObject{},
).
Watches(&source.Kind{Type: &v1.Jaeger{}}, &handler.InstrumentedEnqueueRequestForObject{}).
Complete(r)
return err
}

View File

@ -4,6 +4,7 @@ import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
@ -51,5 +52,5 @@ func TestRegisterWithManager(t *testing.T) {
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
assert.NoError(t, err)
}

View File

@ -15,11 +15,9 @@ import (
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
var k8sClient client.Client
var testEnv *envtest.Environment
var testScheme *runtime.Scheme = scheme.Scheme
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ metadata:
spec:
strategy: allInOne
allInOne:
image: jaegertracing/all-in-one:1.65.0
image: jaegertracing/all-in-one:1.37.0
options:
log-level: debug
query:

View File

@ -1,17 +0,0 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority # priorityClassName here
value: 1000000
globalDefault: false
description: "This priority class should be used for XYZ service pods only."
---
apiVersion: jaegertracing.io/v1
kind: "Jaeger"
metadata:
name: "my-jaeger"
spec:
strategy: allInOne
allInOne:
image: jaegertracing/all-in-one:1.30.0
priorityClassName: high-priority # priorityClassName here

View File

@ -1,13 +0,0 @@
apiVersion: jaegertracing.io/v1
kind: Jaeger
metadata:
name: ingress-with-hosts
spec:
query:
options:
base-path: "/"
ingress:
enabled: true
pathType: Prefix
hosts:
- mesh-jaeger.xxx.com #your domain name.

View File

@ -17,7 +17,7 @@ spec:
serviceAccountName: jaeger-operator
containers:
- name: jaeger-operator
image: jaegertracing/jaeger-operator:1.65.0
image: jaegertracing/jaeger-operator:1.37.0
ports:
- containerPort: 8383
name: http-metrics
@ -41,7 +41,7 @@ spec:
- name: OPERATOR_NAME
value: "jaeger-operator"
- name: jaeger-agent
image: jaegertracing/jaeger-agent:1.62.0
image: jaegertracing/jaeger-agent:1.37.0
env:
- name: POD_NAMESPACE
valueFrom:

View File

@ -23,7 +23,7 @@ spec:
- containerPort: 8080
protocol: TCP
- name: jaeger-agent
image: jaegertracing/jaeger-agent:1.62.0
image: jaegertracing/jaeger-agent:1.37.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5775

View File

@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: tracegen
image: jaegertracing/jaeger-tracegen:1.65.0
image: jaegertracing/jaeger-tracegen:1.37.0
args:
- -duration=30m
- -workers=10

172
go.mod
View File

@ -1,116 +1,108 @@
module github.com/jaegertracing/jaeger-operator
go 1.22.0
go 1.17
require (
github.com/Masterminds/semver v1.5.0
github.com/go-logr/logr v1.4.2
github.com/google/gnostic-models v0.6.9
github.com/google/go-cmp v0.7.0
github.com/google/gnostic v0.6.9
github.com/google/go-cmp v0.5.8
github.com/mitchellh/go-homedir v1.1.0
github.com/openshift/api v0.0.0-20231206170337-f356bd9e2ff6
github.com/openshift/elasticsearch-operator v0.0.0-20231013125000-a5c132efd4e0
github.com/openshift/library-go v0.0.0-20231130204458-653f82d961a1
github.com/openshift/api v0.0.0-20220124143425-d74727069f6f
github.com/openshift/elasticsearch-operator v0.0.0-20220708171007-a87102296ded
github.com/opentracing/opentracing-go v1.2.0
github.com/operator-framework/operator-lib v0.13.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
github.com/operator-framework/operator-lib v0.10.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.13.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.8.0
github.com/uber/jaeger-client-go v2.30.0+incompatible
go.opentelemetry.io/otel v1.35.0
go.opentelemetry.io/otel/exporters/jaeger v1.17.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0
go.opentelemetry.io/otel/exporters/prometheus v0.57.0
go.opentelemetry.io/otel/metric v1.35.0
go.opentelemetry.io/otel/sdk v1.35.0
go.opentelemetry.io/otel/sdk/metric v1.35.0
go.opentelemetry.io/otel/trace v1.35.0
go.uber.org/zap v1.27.0
gomodules.xyz/jsonpatch/v2 v2.4.0
google.golang.org/grpc v1.71.0
k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.3
k8s.io/component-base v0.29.3
sigs.k8s.io/controller-runtime v0.17.3
go.opentelemetry.io/otel v0.20.0
go.opentelemetry.io/otel/exporters/metric/prometheus v0.20.0
go.opentelemetry.io/otel/exporters/otlp v0.20.0
go.opentelemetry.io/otel/exporters/trace/jaeger v0.20.0
go.opentelemetry.io/otel/metric v0.20.0
go.opentelemetry.io/otel/oteltest v0.20.0
go.opentelemetry.io/otel/sdk v0.20.0
go.opentelemetry.io/otel/sdk/export/metric v0.20.0
go.opentelemetry.io/otel/sdk/metric v0.20.0
go.opentelemetry.io/otel/trace v0.20.0
gomodules.xyz/jsonpatch/v2 v2.2.0
google.golang.org/grpc v1.48.0
k8s.io/api v0.24.3
k8s.io/apimachinery v0.24.3
k8s.io/client-go v0.24.3
sigs.k8s.io/controller-runtime v0.12.3
)
require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/codahale/hdrhistogram v0.0.0-00010101000000-000000000000 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.3.0 // indirect
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/oauth2 v0.26.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/protobuf v1.36.5 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.2 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 // indirect
k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/component-base v0.24.2 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace github.com/codahale/hdrhistogram => github.com/HdrHistogram/hdrhistogram-go v1.1.2

1657
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
# GitHub action to run the E2E tests.
# For this purpose, it would be a better idea to use a reusable workflow. There
# is some documentation about how to use a local reusable workflow:
# https://github.blog/changelog/2022-01-25-github-actions-reusable-workflows-can-be-referenced-locally/
# But it seems it doesn't work properly:
# https://github.community/t/allow-reusable-workflows-to-be-located-at-arbitrary-locations-and-be-local/212745/7
# So, the CI uses a local GitHub action as a template to run all the tests.
name: Run E2E tests
description: "Run an E2E test suite"
inputs:
testsuite_name:
description: "Name of the test suite to run"
required: true
kube_version:
description: "Kubernetes version to use"
required: true
runs:
using: "composite"
steps:
- name: "Set up Go"
uses: actions/setup-go@v2.1.4
with:
go-version: 1.17
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
install: true
-
name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: e2e-${{ github.sha }}
restore-keys: |
e2e-
- name: "Install KIND"
run: ./hack/install/install-kind.sh
shell: bash
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
shell: bash
- name: "Install gomplate"
run: ./hack/install/install-gomplate.sh
shell: bash
- name: "Install dependencies"
run: make install-tools
shell: bash
- name: "Run E2E ${{ inputs.testsuite_name }} test suite on ${{ inputs.kube_version }}"
env:
VERBOSE: "true"
KUBE_VERSION: "${{ inputs.kube_version }}"
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make run-e2e-tests-${{ inputs.testsuite_name }}
shell: bash
-
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
shell: bash

View File

@ -9,4 +9,3 @@ export EXAMPLES_DIR=$ROOT_DIR/examples
export GOMPLATE=$ROOT_DIR/bin/gomplate
export YQ=$ROOT_DIR/bin/yq
export KUTTL=$ROOT_DIR/bin/kubectl-kuttl
export KUSTOMIZE=$ROOT_DIR/bin/kustomize

View File

@ -1,11 +0,0 @@
#!/bin/bash
# Since operator-sdk 1.26.0, `make bundle` changes the `createdAt` field from the bundle
# even if it is patched:
# https://github.com/operator-framework/operator-sdk/pull/6136
# This code checks if only the createdAt field. If is the only change, it is ignored.
# Else, it will do nothing.
# https://github.com/operator-framework/operator-sdk/issues/6285#issuecomment-1415350333
git diff --quiet -I'^ createdAt: ' bundle
if ((! $?)) ; then
git checkout bundle
fi

View File

@ -19,3 +19,5 @@ tar -xzf $tar_file -C /tmp/
cp /tmp/$PROGRAM $BIN/
chmod +x $BIN/$PROGRAM
export PATH=$PATH:$BIN

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="0.14.0"
VERSION="0.9.0"
echo "Installing controller-gen"

View File

@ -1,24 +0,0 @@
#!/bin/bash
VERSION=3.4.20
echo "Installing etcd"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh
PROGRAM="etcd"
create_bin
check_tool "$BIN/$PROGRAM" $VERSION "--version"
url="https://github.com/etcd-io/etcd/releases/download/v${VERSION}/etcd-v${VERSION}-linux-amd64.tar.gz -o /tmp/etcd-v${VERSION}-linux-amd64.tar.gz"
retry "curl -L $url -o /tmp/etcd-v${VERSION}-linux-amd64.tar.gz"
mkdir /tmp/etcd-download-test
tar xzvf /tmp/etcd-v${VERSION}-linux-amd64.tar.gz -C /tmp/etcd-download-test --strip-components=1
rm -f /tmp/etcd-v${VERSION}-linux-amd64.tar.gz
mv /tmp/etcd-download-test/etcd $BIN
mv /tmp/etcd-download-test/etcdctl $BIN

View File

@ -1,12 +0,0 @@
#!/bin/bash
VERSION="0.1.12"
echo "Installing goimports"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh
create_bin
export GOBIN=$BIN
retry "go install golang.org/x/tools/cmd/goimports@v${VERSION}"

View File

@ -1,16 +0,0 @@
#!/bin/bash
VERSION="1.55.2"
echo "Installing golangci-lint"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh
create_bin
export PROGRAM="golangci-lint"
check_tool "$BIN/$PROGRAM" "$VERSION" "version"
export GOBIN=$BIN
retry "go install github.com/golangci/golangci-lint/cmd/golangci-lint@v${VERSION}"

23
hack/install/install-gosec.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
VERSION="2.12.0 "
echo "Installing gosec"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh
create_bin
PROGRAM="gosec"
check_tool "$BIN/gosec" $VERSION "version"
# Download the installer
retry "curl -sLo $BIN/install-gosec.sh https://raw.githubusercontent.com/securego/gosec/master/install.sh"
chmod +x $BIN/install-gosec.sh
# Run the installer
retry "$BIN/install-gosec.sh v${VERSION}"
export PATH=$PATH:$BIN

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="1.15.0"
VERSION="1.11.2"
echo "Installing istioctl"
@ -23,3 +23,5 @@ cd $BIN
retry "$BIN/downloadIstio"
mv $BIN/istio-${VERSION}/bin/istioctl $BIN/
export PATH=$PATH:$BIN

View File

@ -1,11 +1,7 @@
#!/bin/bash
echo "Installing kind"
VERSION="0.14.0"
VERSION="0.20.0"
# Kubernetes 1.19 and 1.20 are supported by Kind until 0.17.0
if [ "$KUBE_VERSION" == "1.19" ] || [ "$KUBE_VERSION" == "1.20" ]; then
VERSION="0.17.0"
fi
echo "Installing kind"
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source $current_dir/install-utils.sh

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="3.10.0"
VERSION="2.3.1"
echo "Installing kubebuilder"
@ -12,11 +12,12 @@ create_bin
check_tool "$BIN/$PROGRAM" $VERSION "version"
url="https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$VERSION/kubebuilder_$(go env GOOS)_$(go env GOARCH)"
url="https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$VERSION/kubebuilder_${VERSION}_$(go env GOOS)_amd64.tar.gz"
tar_file="/tmp/kubebuilder.tar.gz"
retry "curl -sLo $tar_file $url"
tar -xzf $tar_file -C /tmp/
retry "curl -sLo $BIN/kubebuilder $url"
cp /tmp/kubebuilder_${VERSION}_$(go env GOOS)_amd64/bin/* $BIN/
chmod +x $BIN/kubebuilder
$current_dir/install-etcd.sh
export PATH=$PATH:$BIN

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="4.5.7"
VERSION="4.2.0"
echo "Installing kustomize"
@ -17,3 +17,5 @@ url="https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F
tar_file="/tmp/kustomize.tar.gz"
retry "curl -sLo $tar_file $url"
tar -xzf $tar_file -C $BIN
export PATH=$PATH:$BIN

View File

@ -1,5 +1,5 @@
#!/bin/bash
VERSION="0.13.0"
VERSION="0.12.1"
echo "Installing kuttl"

View File

@ -59,7 +59,7 @@ function check_tool() {
# If the program is there and uses the correct version, do nothing
if [[ -f "$tool" ]]; then
if [[ "$($tool $parameter)" =~ .*"$version".* ]]; then
echo "$(basename -- $tool) $version is installed already"
echo "$(basename -- $tool) $version is already installed"
exit 0
fi
fi
@ -87,4 +87,5 @@ function download() {
retry "curl -sLo $tool_path $url"
chmod +x $tool_path
export PATH=$PATH:$BIN
}

View File

@ -1,8 +1,6 @@
#!/bin/bash
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
export ROOT_DIR=$current_dir/../
source $current_dir/common.sh
set -e
# Enable verbosity
@ -15,36 +13,48 @@ if [ "$#" -ne 3 ]; then
exit 1
fi
test_suite_name="$1"
use_kind_cluster="$2"
jaeger_olm="$3"
test_suite_name=$1
use_kind_cluster=$2
jaeger_olm=$3
timeout="5m"
# Prepare the cluster
if [ "$use_kind_cluster" == true ]; then
kubectl wait --timeout=5m --for=condition=available deployment ingress-nginx-controller -n ingress-nginx
kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=5m
make prepare-e2e-tests USE_KIND_CLUSTER=$use_kind_cluster JAEGER_OLM=$jaeger_olm
# Install metrics-server for scalability tests
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
kubectl patch deployment -n kube-system metrics-server --type "json" -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": --kubelet-insecure-tls}]'
kubectl wait --for=condition=available deployment/metrics-server -n kube-system --timeout=5m
fi
if [ "$jaeger_olm" = true ]; then
echo "Skipping Jaeger Operator installation because JAEGER_OLM=true"
else
echo "Installing Jaeger Operator..."
echo Installing Jaeger Operator...
# JAEGER_OPERATOR_VERBOSITY enables verbosity in the Jaeger Operator
# JAEGER_OPERATOR_KAFKA_MINIMAL enables minimal deployment of Kafka clusters
make cert-manager deploy JAEGER_OPERATOR_VERBOSITY=DEBUG JAEGER_OPERATOR_KAFKA_MINIMAL=true
kubectl wait --for=condition=available deployment jaeger-operator -n observability --timeout=$timeout
kubectl wait --timeout=5m --for=condition=available deployment jaeger-operator -n observability
fi
# Prepare reports folder
root_dir=$current_dir/../
reports_dir=$root_dir/reports
mkdir -p $reports_dir
rm -f $reports_dir/$test_suite_name.xml
# Ensure KUTTL is installed
$current_dir/install/install-kuttl.sh
export KUTTL=$root_dir/bin/kubectl-kuttl
mkdir -p $reports_dir
cd $root_dir
$root_dir/hack/install/install-kuttl.sh
make render-e2e-tests-$test_suite_name
echo "Running $test_suite_name E2E tests"
echo Running $test_suite_name E2E tests
cd tests/e2e/$test_suite_name/_build
# Don't stop if something fails because we want to process the
@ -58,7 +68,7 @@ set -e
# The output XML needs some work because it adds "artifacts" as a test case.
# Also, the suites doesn't have a name so, we need to add one.
go install github.com/RH-QE-Distributed-Tracing/junitcli/cmd/junitcli@v1.0.6
go install github.com/iblancasa/junitcli/cmd/junitcli@v1.0.1
junitcli --suite-name $test_suite_name --report --output $reports_dir/$test_suite_name.xml ./artifacts/kuttl-test.xml
if [ "$KIND_KEEP_CLUSTER" != true ] && [ "$use_kind_cluster" == true ]; then

46
hack/run-e2e-tests.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/bash
current_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# Enable verbosity
if [ "$VERBOSE" = true ]; then
set -o xtrace
fi
test_suites=$@
# Don't stop if something fails
set -e
rm -rf logs reports
mkdir -p logs
mkdir -p reports
failed=false
for test_suite in $test_suites; do
echo "============================================================"
echo "Running test suite $test_suite"
echo "============================================================"
make run-e2e-tests-$test_suite 2>&1 | tee -a ./logs/$test_suite.txt
exit_code=${PIPESTATUS[0]}
if [ ! -e ./reports/$test_suite.xml ]; then
echo "Test $test_suite failed with code $exit_code and the report was not generated" >> ./logs/failures.txt
failed=true
fi
if [ "$exit_code" -ne 0 ]; then
echo "Test $test_suite failed with code $exit_code" >> ./logs/failures.txt
failed=true
fi
done
go install github.com/iblancasa/junitcli/cmd/junitcli@v1.0.1
junitcli --report reports
if [ failed = true ]; then
echo "Something failed while running the E2E tests!!!"
exit 1
fi

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.19.16@sha256:476cb3269232888437b61deca013832fee41f9f074f9bed79f57e4280f7c48b7
image: kindest/node:v1.19.16@sha256:d9c819e8668de8d5030708e484a9fdff44d95ec4675d136ef0a0a584e587f65c
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394
image: kindest/node:v1.20.15@sha256:6f2d011dffe182bad80b85f6c00e8ca9d86b5b8922cdf433d53575c4c5212248
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093
image: kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.22.17@sha256:f5b2e5698c6c9d6d0adc419c0deae21a425c07d81bbf3b6a6834042f25d4fba2
image: kindest/node:v1.22.9@sha256:8135260b959dfe320206eb36b3aeda9cffcb262f4b44cda6b33f7bb73f453105
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb
image: kindest/node:v1.23.6@sha256:b1fa224cc6c7ff32455e0b1fd9cbfd3d3bc87ecaa8fcb06961ed1afb3db0f9ae
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -2,7 +2,7 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
kubeadmConfigPatches:
- |
kind: InitConfiguration

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

View File

@ -1,18 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP

Some files were not shown because too many files have changed in this diff Show More