Compare commits

..

No commits in common. "main" and "v1.25.0" have entirely different histories.

652 changed files with 20800 additions and 89732 deletions

4
.ci/after-success.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
echo "Uploading code coverage results"
bash <(curl -s https://codecov.io/bash)

View File

@ -1,10 +0,0 @@
#!/bin/bash
OPERATOR_VERSION=$(git describe --tags)
echo "${GITHUB_TOKEN}" | gh auth login --with-token
gh config set prompt disabled
gh release create \
-t "Release ${OPERATOR_VERSION}" \
"${OPERATOR_VERSION}" \
'dist/jaeger-operator.yaml#Installation manifest for Kubernetes'

View File

@ -1,3 +1,3 @@
#!/bin/bash
./bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)
${GOPATH}/bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)

5
.ci/install-kuttl.sh Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
sudo curl -Lo /usr/local/bin/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.10.0/kubectl-kuttl_0.10.0_linux_x86_64
sudo chmod +x /usr/local/bin/kubectl-kuttl
export PATH=$PATH:/usr/local/bin

24
.ci/install-sdk.sh Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
DEST="${GOPATH}/bin/operator-sdk"
function install_sdk() {
echo "Downloading the operator-sdk ${SDK_VERSION} into ${DEST}"
if [[ "$OSTYPE" == "darwin"* ]]; then
curl https://github.com/operator-framework/operator-sdk/releases/download/${SDK_VERSION}/operator-sdk-${SDK_VERSION}-x86_64-apple-darwin -sLo ${DEST}
else
curl https://github.com/operator-framework/operator-sdk/releases/download/${SDK_VERSION}/operator-sdk-${SDK_VERSION}-x86_64-linux-gnu -sLo ${DEST}
fi
chmod +x ${DEST}
}
mkdir -p ${GOPATH}/bin
if [ ! -f ${DEST} ]; then
install_sdk
fi
${DEST} version | grep -q ${SDK_VERSION}
if [ $? != 0 ]; then
install_sdk
fi

8
.ci/install.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
make install
RT=$?
if [ ${RT} != 0 ]; then
echo "Failed to install the operator dependencies."
exit ${RT}
fi

16
.ci/lint.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
GOLINT=golint
command -v ${GOLINT} > /dev/null
if [ $? != 0 ]; then
if [ -n ${GOPATH} ]; then
GOLINT="${GOPATH}/bin/golint"
fi
fi
out=$(${GOLINT} ./... | grep -v pkg/storage/elasticsearch/v1 | grep -v zz_generated)
if [[ $out ]]; then
echo "$out"
exit 1
fi

View File

@ -20,6 +20,15 @@ fi
OLD_PWD=$(pwd)
VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
PKG_FILE=deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
CSV_FILE=deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml
CRD_FILE=deploy/crds/jaegertracing.io_jaegers_crd.yaml
# once we get a clarification on the following item, we might not need to have different file names
# https://github.com/operator-framework/community-operators/issues/701
DEST_PKG_FILE=jaeger.package.yaml
DEST_CSV_FILE=jaeger.v${VERSION}.clusterserviceversion.yaml
for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
cd "${LOCAL_REPOSITORIES_PATH}/${dest}"
git remote | grep upstream > /dev/null
@ -32,7 +41,11 @@ for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
git checkout -q main
git rebase -q upstream/main
cp -r "${OLD_PWD}/bundle" "operators/jaeger/${VERSION}"
mkdir -p "${dest}/operators/jaeger/${VERSION}"
cp "${OLD_PWD}/${PKG_FILE}" "${dest}/operators/jaeger/${DEST_PKG_FILE}"
cp "${OLD_PWD}/${CSV_FILE}" "${dest}/operators/jaeger/${VERSION}/${DEST_CSV_FILE}"
cp "${OLD_PWD}/${CRD_FILE}" "${dest}/operators/jaeger/${VERSION}"
git checkout -q -b Update-Jaeger-to-${VERSION}
if [[ $? != 0 ]]; then
@ -40,7 +53,7 @@ for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
exit 1
fi
git add .
git add ${dest}
git commit -sqm "Update Jaeger to v${VERSION}"
@ -52,6 +65,7 @@ for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
echo "Submitting PR on your behalf via 'hub'"
gh pr create --title "Update Jaeger to v${VERSION}" --body-file "${OLD_PWD}/.ci/.checked-pr-template.md"
rm ${tmpfile}
done
cd ${OLD_PWD}

View File

@ -1,37 +0,0 @@
#!/bin/bash
if [[ -z $OPERATOR_VERSION ]]; then
echo "OPERATOR_VERSION isn't set. Skipping process."
exit 1
fi
JAEGER_VERSION=$(echo $JAEGER_VERSION | tr -d '"')
JAEGER_AGENT_VERSION=$(echo $JAEGER_AGENT_VERSION | tr -d '"')
PREVIOUS_VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
# change the versions.txt, bump only operator version.
sed "s~operator=${PREVIOUS_VERSION}~operator=${OPERATOR_VERSION}~gi" -i versions.txt
# changes to deploy/operator.yaml
sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSION}~i" -i config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
# Update the examples according to the release
sed -i "s~all-in-one:.*~all-in-one:${JAEGER_VERSION}~gi" examples/all-in-one-with-options.yaml
# statefulset-manual-sidecar
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
# operator-with-tracing
sed -i "s~jaeger-operator:.*~jaeger-operator:${OPERATOR_VERSION}~gi" examples/operator-with-tracing.yaml
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/operator-with-tracing.yaml
# tracegen
sed -i "s~jaeger-tracegen:.*~jaeger-tracegen:${JAEGER_VERSION}~gi" examples/tracegen.yaml
VERSION=${OPERATOR_VERSION} USER=jaegertracing make bundle

View File

@ -4,7 +4,7 @@ BASE_BUILD_IMAGE=${BASE_BUILD_IMAGE:-"jaegertracing/jaeger-operator"}
OPERATOR_VERSION=${OPERATOR_VERSION:-$(git describe --tags)}
## if we are on a release tag, let's extract the version number
## the other possible value, currently, is 'main' (or another branch name)
## the other possible value, currently, is 'master' (or another branch name)
## if we are not running in the CI, it fallsback to the `git describe` above
if [[ $OPERATOR_VERSION == v* ]]; then
OPERATOR_VERSION=$(echo ${OPERATOR_VERSION} | grep -Po "([\d\.]+)")
@ -12,7 +12,6 @@ if [[ $OPERATOR_VERSION == v* ]]; then
fi
BUILD_IMAGE=${BUILD_IMAGE:-"${BASE_BUILD_IMAGE}:${OPERATOR_VERSION}"}
DOCKER_USERNAME=${DOCKER_USERNAME:-"jaegertracingbot"}
if [ "${DOCKER_PASSWORD}x" != "x" -a "${DOCKER_USERNAME}x" != "x" ]; then
echo "Performing a 'docker login'"

90
.ci/release.sh Executable file
View File

@ -0,0 +1,90 @@
#!/bin/bash
git diff -s --exit-code
if [[ $? != 0 ]]; then
echo "The repository isn't clean. We won't proceed, as we don't know if we should commit those changes or not."
exit 1
fi
BASE_BUILD_IMAGE=${BASE_BUILD_IMAGE:-"jaegertracing/jaeger-operator"}
BASE_TAG=${BASE_TAG:-$(git describe --tags)}
OPERATOR_VERSION=${OPERATOR_VERSION:-${BASE_TAG}}
OPERATOR_VERSION=$(echo ${OPERATOR_VERSION} | grep -Po "([\d\.]+)")
JAEGER_VERSION=$(echo ${OPERATOR_VERSION} | grep -Po "([\d]+\.[\d]+\.[\d]+)" | head -n 1)
TAG=${TAG:-"v${OPERATOR_VERSION}"}
BUILD_IMAGE=${BUILD_IMAGE:-"${BASE_BUILD_IMAGE}:${OPERATOR_VERSION}"}
CREATED_AT=$(date -u -Isecond)
PREVIOUS_VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
if [[ ${BASE_TAG} =~ ^release/v.[[:digit:].]+(\-.*)?$ ]]; then
echo "Releasing ${OPERATOR_VERSION} from ${BASE_TAG}"
else
echo "The release tag does not match the expected format: ${BASE_TAG}"
exit 1
fi
if [ "${GH_WRITE_TOKEN}x" == "x" ]; then
echo "The GitHub write token isn't set. Skipping release process."
exit 1
fi
# changes to deploy/operator.yaml
sed "s~image: jaegertracing/jaeger-operator.*~image: ${BUILD_IMAGE}~gi" -i deploy/operator.yaml
sed "s~image: jaegertracing/jaeger-agent:.*~image: jaegertracing/jaeger-agent:${JAEGER_VERSION}~gi" -i deploy/operator.yaml
# changes to test/operator.yaml
sed "s~image: jaegertracing/jaeger-operator.*~image: ${BUILD_IMAGE}~gi" -i test/operator.yaml
# change the versions.txt, bump only operator version.
sed "s~operator=${PREVIOUS_VERSION}~operator=${OPERATOR_VERSION}~gi" -i versions.txt
mkdir -p deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}
cp deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml \
deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
operator-sdk generate csv \
--csv-channel=stable \
--make-manifests=false \
--csv-version=${OPERATOR_VERSION}
# changes to deploy/olm-catalog/jaeger-operator/manifests
sed "s~containerImage: docker.io/jaegertracing/jaeger-operator:${PREVIOUS_VERSION}~containerImage: docker.io/jaegertracing/jaeger-operator:${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
sed "s~image: jaegertracing/jaeger-operator:${PREVIOUS_VERSION}~image: jaegertracing/jaeger-operator:${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
sed "s~version: ${PREVIOUS_VERSION}~version: ${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
sed "s~name: jaeger-operator.v${PREVIOUS_VERSION}~name: jaeger-operator.v${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
# changes to deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
sed "s~currentCSV: jaeger-operator.v${PREVIOUS_VERSION}~currentCSV: jaeger-operator.v${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
cp deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml \
deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml
git diff -s --exit-code
if [[ $? == 0 ]]; then
echo "No changes detected. Skipping."
else
git add \
deploy/operator.yaml \
deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml \
deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml \
test/operator.yaml \
versions.txt
git diff -s --exit-code
if [[ $? != 0 ]]; then
echo "There are more changes than expected. Skipping the release."
git diff
exit 1
fi
git config user.email "jaeger-release@jaegertracing.io"
git config user.name "Jaeger Release"
git commit -qm "Release ${TAG}"
git tag ${TAG}
git push --repo=https://${GH_WRITE_TOKEN}@github.com/jaegertracing/jaeger-operator.git --tags
git push https://${GH_WRITE_TOKEN}@github.com/jaegertracing/jaeger-operator.git refs/tags/${TAG}:master
fi

62
.ci/run-e2e-tests.sh Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -x
[[ -z "$TEST_GROUP" ]] && { echo "TEST_GROUP is undefined, exiting" ; exit 1; }
## Since we're running MiniKube with --vm-driver none, change imagePullPolicy to get the image locally
sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' test/operator.yaml
## remove this once #947 is fixed
export VERBOSE='-v -timeout 20m'
if [ "${TEST_GROUP}" = "es" ]; then
echo "Running elasticsearch tests"
make es
make e2e-tests-es
elif [ "${TEST_GROUP}" = "es-self-provisioned" ]; then
echo "Running self provisioned elasticsearch tests"
make e2e-tests-self-provisioned-es
res=$?
if [[ ${res} -ne 0 ]]; then
kubectl log deploy/elasticsearch-operator -n openshift-logging
fi
exit ${res}
elif [ "${TEST_GROUP}" = "smoke" ]
then
echo "Running Smoke Tests"
make e2e-tests-smoke
elif [ "${TEST_GROUP}" = "cassandra" ]
then
echo "Running Cassandra Tests"
make cassandra
make e2e-tests-cassandra
elif [ "${TEST_GROUP}" = "streaming" ]
then
echo "Running Streaming Tests"
make e2e-tests-streaming
elif [ "${TEST_GROUP}" = "examples1" ]
then
echo "Running Examples1 Tests"
make e2e-tests-examples1
elif [ "${TEST_GROUP}" = "examples2" ]
then
echo "Running Examples2 Tests"
make e2e-tests-examples2
elif [ "${TEST_GROUP}" = "es-token-propagation" ]
then
echo "Running token propagation tests"
make e2e-tests-token-propagation-es
elif [ "${TEST_GROUP}" = "generate" ]
then
echo "Running CLI manifest generation tests"
make e2e-tests-generate
elif [ "${TEST_GROUP}" = "upgrade" ]
then
echo "Running upgrade tests"
make e2e-tests-upgrade
elif [ "${TEST_GROUP}" = "istio" ]
then
echo "Running Smoke Tests with istio"
make e2e-tests-istio
else
echo "Unknown TEST_GROUP [${TEST_GROUP}]"; exit 1
fi

8
.ci/script.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
make install-tools ci
RT=$?
if [ ${RT} != 0 ]; then
echo "Failed to build the operator."
exit ${RT}
fi

11
.ci/setup-docker.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
## this script is meant to be executed in a CI executor based on Ubuntu 18.04 and hasn't been tested anywhere else
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get update
sudo apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io socat

3
.ci/upload-test-coverage.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
bash <(curl -s https://codecov.io/bash)

15
.ci/vgot.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
if [ $# = 0 ]; then
usage: vgot cmdpackage[@version]... >&2
exit 2
fi
d=`mktemp -d`
cd "$d"
go mod init temp >/dev/null 2>&1
for i; do
pkg=`echo $i | sed 's/@.*//'`
go get "$i" &&
go install "$pkg" &&
echo installed `go list -f '{{.ImportPath}}@{{.Module.Version}}' "$pkg"`
done
rm -r "$d"

View File

@ -1,16 +1,8 @@
coverage:
status:
project:
default:
target: auto
# this allows a 0.1% drop from the previous base commit coverage
threshold: 0.1%
ignore:
- "apis/v1/zz_generated.deepcopy.go"
- "apis/v1/zz_generated.defaults.go"
- "apis/v1/zz_generated.openapi.go"
- "apis/v1/groupversion_info.go"
- "pkg/kafka/v1beta2/zz_generated.deepcopy.go"
- "pkg/kafka/v1beta2/zz_generated.openapi.go"
- "pkg/kafka/v1beta2/groupversion_info.go"
- "pkg/util/k8s_utils.go"
- "pkg/apis/io/v1alpha1/zz_generated.deepcopy.go"
- "pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go"
- "pkg/apis/io/v1alpha1/zz_generated.defaults.go"
- "pkg/apis/jaegertracing/v1/zz_generated.defaults.go"
- "pkg/apis/jaegertracing/v1/zz_generated.openapi.go"
- "pkg/apis/kafka/v1beta2/zz_generated.deepcopy.go"
- "pkg/apis/kafka/v1beta2/zz_generated.openapi.go"

View File

@ -1,4 +0,0 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
bin/
testbin/

View File

@ -1,62 +1,11 @@
version: 2
updates:
- package-ecosystem: docker
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: docker
directory: "/tests"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- docker
- ok-to-test
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- go
- ok-to-test
groups:
golang-org-x:
patterns:
- "golang.org/x/*"
opentelemetry:
patterns:
- "go.opentelemetry.io/*"
prometheus:
patterns:
- "github.com/prometheus-operator/prometheus-operator"
- "github.com/prometheus-operator/prometheus-operator/*"
- "github.com/prometheus/prometheus"
- "github.com/prometheus/prometheus/*"
- "github.com/prometheus/client_go"
- "github.com/prometheus/client_go/*"
kubernetes:
patterns:
- "k8s.io/*"
- "sigs.k8s.io/*"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
time: "03:00"
timezone: "Europe/Berlin"
labels:
- dependencies
- github_actions
- ok-to-test

View File

@ -2,41 +2,26 @@ name: "CI Workflow"
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
branches: [ master ]
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
permissions:
contents: read
branches: [ master ]
jobs:
basic-checks:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
- uses: actions/setup-go@v2.1.3
with:
go-version: "1.22"
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
- name: "install kustomize"
run: ./hack/install/install-kustomize.sh
go-version: 1.16
- uses: actions/checkout@v2.3.4
- uses: jpkrohling/setup-operator-sdk@v1.0.2
with:
operator-sdk-version: v0.18.2
- name: "basic checks"
run: make install-tools ci
run: ./.ci/script.sh
- name: "upload test coverage report"
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
with:
token: ${{ secrets.CODECOV_TOKEN }}
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
run: ./.ci/upload-test-coverage.sh

View File

@ -2,23 +2,12 @@ name: "CodeQL"
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
branches: [ master ]
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
permissions:
contents: read
branches: [ master ]
jobs:
codeql-analyze:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
name: CodeQL Analyze
runs-on: ubuntu-latest
@ -29,24 +18,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: "go.mod"
# Disable CodeQL for tests
# https://github.com/github/codeql/issues/4786
- run: rm -rf ./tests
uses: actions/checkout@v2.3.4
- name: Initialize CodeQL
uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/init@v1
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/autobuild@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
uses: github/codeql-action/analyze@v1

41
.github/workflows/e2e-kubernetes.yaml vendored Normal file
View File

@ -0,0 +1,41 @@
name: "Kubernetes end-to-end tests"
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
end-to-end:
runs-on: ubuntu-20.04
strategy:
matrix:
TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, upgrade, istio]
steps:
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.16
- uses: actions/checkout@v2.3.4
- name: "setup docker"
run: ./.ci/setup-docker.sh
- uses: manusa/actions-setup-minikube@v2.4.2
with:
minikube version: 'v1.20.0'
kubernetes version: 'v1.19.1'
driver: none
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--addons=ingress --wait=all'
- uses: jpkrohling/setup-kubectl@v1.0.2
- uses: jpkrohling/setup-operator-sdk@v1.0.2
with:
operator-sdk-version: v0.18.2
- name: "install go tools"
run: make install-tools
- name: "running end to end test"
env:
CI: true
TEST_GROUP: ${{ matrix.TEST_GROUP }}
run: ./.ci/run-e2e-tests.sh

35
.github/workflows/e2e-kuttl.yaml vendored Normal file
View File

@ -0,0 +1,35 @@
name: "End-to-end tests (kuttl)"
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
e2e-tests:
name: End-to-end tests
runs-on: ubuntu-20.04
strategy:
matrix:
kube-version:
- "1.19"
- "1.20"
- "1.21"
steps:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: "install kuttl"
run: ./.ci/install-kuttl.sh
- name: "run tests"
env:
KUBE_VERSION: ${{ matrix.kube-version }}
run: make install kuttl-e2e KUBE_VERSION=$KUBE_VERSION

View File

@ -1,84 +0,0 @@
name: E2E tests
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
concurrency:
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
permissions:
contents: read
jobs:
e2e:
name: "Run ${{ matrix.testsuite.label }} E2E tests (${{ matrix.kube-version }})"
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
kube-version:
- "1.19"
- "1.30"
testsuite:
- { name: "elasticsearch", label: "Elasticsearch" }
- { name: "examples", label: "Examples" }
- { name: "generate", label: "Generate" }
- { name: "miscellaneous", label: "Miscellaneous" }
- { name: "sidecar", label: "Sidecar" }
- { name: "streaming", label: "Streaming" }
- { name: "ui", label: "UI" }
- { name: "upgrade", label: "Upgrade" }
steps:
- name: "Check out code into the Go module directory"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: "Set up Go"
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: "1.22"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
with:
install: true
- name: Cache Docker layers
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: /tmp/.buildx-cache
key: e2e-${{ github.sha }}
restore-keys: |
e2e-
- name: "Install KIND"
run: ./hack/install/install-kind.sh
shell: bash
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
shell: bash
- name: "Install gomplate"
run: ./hack/install/install-gomplate.sh
shell: bash
- name: "Install dependencies"
run: make install-tools
shell: bash
- name: "Run ${{ matrix.testsuite.label }} E2E test suite on Kube ${{ matrix.kube-version }}"
env:
VERBOSE: "true"
KUBE_VERSION: "${{ matrix.kube-version }}"
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make run-e2e-tests-${{ matrix.testsuite.name }}
shell: bash
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
shell: bash

View File

@ -1,54 +0,0 @@
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '45 13 * * 1'
push:
branches: [ "main" ]
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
with:
sarif_file: results.sarif

View File

@ -2,27 +2,20 @@ name: "Publish images"
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
permissions:
contents: read
branches: [ master ]
jobs:
publish:
runs-on: ubuntu-latest
env:
USER: jaegertracing
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: actions/checkout@v2.3.4
- uses: docker/setup-qemu-action@v1.2.0
- uses: docker/setup-buildx-action@v1.5.1
- name: "publishes the images"
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
OPERATOR_VERSION: main
OPERATOR_VERSION: master
run: ./.ci/publish-images.sh

View File

@ -1,43 +1,30 @@
name: "Prepare the release"
name: "Release"
on:
push:
tags:
- 'v*'
- 'release/v*'
jobs:
release:
runs-on: ubuntu-20.04
env:
USER: jaegertracing
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
- uses: actions/setup-go@v2.1.3
with:
go-version: "1.22"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "install kubebuilder"
run: ./hack/install/install-kubebuilder.sh
- name: "install kustomize"
run: ./hack/install/install-kustomize.sh
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: "generate release resources"
run: make release-artifacts USER=jaegertracing
- name: "create the release in GitHub"
go-version: 1.16
- uses: actions/checkout@v2.3.4
- uses: jpkrohling/setup-operator-sdk@v1.0.2
with:
operator-sdk-version: v0.18.2
- uses: docker/setup-qemu-action@v1.2.0
- uses: docker/setup-buildx-action@v1.5.1
- name: "perform the release"
env:
GITHUB_TOKEN: ${{ github.token }}
run: ./.ci/create-release-github.sh
GH_WRITE_TOKEN: ${{ secrets.GH_WRITE_TOKEN }}
run: ./.ci/release.sh
- name: "publishes the images"
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
run: ./.ci/publish-images.sh

View File

@ -1,30 +0,0 @@
name: "Operator-SDK Scorecard"
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
permissions:
contents: read
jobs:
operator-sdk-scorecard:
name: "Operator-SDK Scorecard"
runs-on: ubuntu-latest
steps:
- name: "Check out code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Install KIND"
run: ./hack/install/install-kind.sh
- name: "Install KUTTL"
run: ./hack/install/install-kuttl.sh
- name: "Run Operator-SDK scorecard test"
env:
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
run: make scorecard-tests-local

12
.gitignore vendored
View File

@ -3,10 +3,7 @@ build/_output
build/_test
deploy/test
vendor
bin
tests/_build
_build
logs
# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
### Emacs ###
# -*- mode: gitignore; -*-
@ -87,12 +84,3 @@ tags
# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
fmt.log
import.log
### Kubernetes ###
kubeconfig
bin
### Timestamp files to avoid rebuilding Docker images if not needed ###
build-assert-job
docker-e2e-upgrade-image
build-e2e-upgrade-image
### Reports for E2E tests
reports

View File

@ -1,33 +0,0 @@
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gosec
- linters:
- staticcheck
text: "SA1019:"
linters-settings:
goimports:
local-prefixes: github.com/jaegertracing/jaeger-operator
gosimple:
go: "1.22"
linters:
enable:
- bidichk
- errorlint
- gofumpt
- goimports
- gosec
- govet
- misspell
- testifylint
disable:
- errcheck
run:
go: '1.22'
timeout: 10m

View File

@ -1,253 +1,5 @@
Changes by Version
==================
## v1.65.0 (2025-01-22)
* Pin agent version to 1.62.0 ([#2790](https://github.com/jaegertracing/jaeger-operator/pull/2790), [@rubenvp8510](https://github.com/rubenvp8510))
* Added compatibility for Jaeger Operator v1.61.x and v1.62.x ([#2725](https://github.com/jaegertracing/jaeger-operator/pull/2725), [@mooneeb](https://github.com/mooneeb))
## v1.62.0 (2024-10-10)
* TRACING-4238 | Fix gatewat 502 timeout ([#2694](https://github.com/jaegertracing/jaeger-operator/pull/2694), [@pavolloffay](https://github.com/pavolloffay))
* feat: added missing test for elasticsearch reconciler ([#2662](https://github.com/jaegertracing/jaeger-operator/pull/2662), [@Ankit152](https://github.com/Ankit152))
## v1.61.0 (2024-09-16)
* Bump google.golang.org/grpc from 1.66.0 to 1.66.1 ([#2675](https://github.com/jaegertracing/jaeger-operator/pull/2675), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump google.golang.org/grpc from 1.65.0 to 1.66.0 ([#2670](https://github.com/jaegertracing/jaeger-operator/pull/2670), [@dependabot[bot]](https://github.com/apps/dependabot))
* Bump the opentelemetry group with 9 updates ([#2668](https://github.com/jaegertracing/jaeger-operator/pull/2668), [@dependabot[bot]](https://github.com/apps/dependabot))
## v1.60.0 (2024-08-13)
* Fix Golang version in go.mod ([#2652](https://github.com/jaegertracing/jaeger-operator/pull/2652), [@iblancasa](https://github.com/iblancasa))
## v1.60.0 (2024-08-09)
* Test on k8s 1.30 ([#2647](https://github.com/jaegertracing/jaeger-operator/pull/2647), [@pavolloffay](https://github.com/pavolloffay))
* Bump go to 1.22 and controller-gen to 1.14 ([#2646](https://github.com/jaegertracing/jaeger-operator/pull/2646), [@pavolloffay](https://github.com/pavolloffay))
## v1.59.0 (2024-08-06)
* Update compatibility matrix for v1.57.x ([#2594](https://github.com/jaegertracing/jaeger-operator/pull/2594), [@mooneeb](https://github.com/mooneeb))
* imagePullSecrets is not set for agent DaemonSet ([#2563](https://github.com/jaegertracing/jaeger-operator/pull/2563), [@antoniomerlin](https://github.com/antoniomerlin))
## v1.57.0 (2024-05-06)
## v1.55.0 (2024-03-22)
* Add server URL to JaegerMetricsStorageSpec ([#2481](https://github.com/jaegertracing/jaeger-operator/pull/2481), [@antoniomerlin](https://github.com/antoniomerlin))
* Use the host set in the Ingess field for the OpenShift Route ([#2409](https://github.com/jaegertracing/jaeger-operator/pull/2409), [@iblancasa](https://github.com/iblancasa))
* Add minimum Kubernetes and OpenShift versions ([#2492](https://github.com/jaegertracing/jaeger-operator/pull/2492), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
## v1.54.0 (2024-02-14)
* apis/v1: add jaeger agent deprecation warning ([#2471](https://github.com/jaegertracing/jaeger-operator/pull/2471), [@frzifus](https://github.com/frzifus))
## V1.53.0 (2024-01-17)
* Choose the newer autoscaling version by default ([#2374](https://github.com/jaegertracing/jaeger-operator/pull/2374), [@iblancasa](https://github.com/iblancasa))
* Upgrade operator-sdk to 1.32.0 ([#2388](https://github.com/jaegertracing/jaeger-operator/pull/2388), [@iblancasa](https://github.com/iblancasa))
* Fix containerImage field and remove statement about failing CI ([#2386](https://github.com/jaegertracing/jaeger-operator/pull/2386), [@iblancasa](https://github.com/iblancasa))
* Fix injection: prefer jaeger in the same namespace ([#2383](https://github.com/jaegertracing/jaeger-operator/pull/2383), [@pavolloffay](https://github.com/pavolloffay))
## v1.52.0 (2023-12-07)
* Add missing container security context settings and tests ([#2354](https://github.com/jaegertracing/jaeger-operator/pull/2354), [@tingeltangelthomas](https://github.com/tingeltangelthomas))
## v1.51.0 (2023-11-17)
* Support configuring images via RELATED_IMAGE_ environment variables ([#2355](https://github.com/jaegertracing/jaeger-operator/pull/2355), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
* Regenerate ES certificated when is close to 1 day for expire ([#2356](https://github.com/jaegertracing/jaeger-operator/pull/2356), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump actions/checkout from 3 to 4 ([#2316](https://github.com/jaegertracing/jaeger-operator/pull/2316), [@dependabot[bot]](https://github.com/apps/dependabot))
* bump grpc to 1.58.3 ([#2346](https://github.com/jaegertracing/jaeger-operator/pull/2346), [@rubenvp8510](https://github.com/rubenvp8510))
* Bump golang version to 1.21 ([#2347](https://github.com/jaegertracing/jaeger-operator/pull/2347), [@rubenvp8510](https://github.com/rubenvp8510))
* Ensure oauth-proxy ImageStream is detected eventually ([#2340](https://github.com/jaegertracing/jaeger-operator/pull/2340), [@bverschueren](https://github.com/bverschueren))
* Check if envFrom has ConfigMapRef set ([#2342](https://github.com/jaegertracing/jaeger-operator/pull/2342), [@edwardecook](https://github.com/edwardecook))
* Bump golang.org/x/net from 0.13.0 to 0.17.0 ([#2343](https://github.com/jaegertracing/jaeger-operator/pull/2343), [@dependabot[bot]](https://github.com/apps/dependabot))
* Fix issue related to new encoding in oauth-proxy image ([#2345](https://github.com/jaegertracing/jaeger-operator/pull/2345), [@iblancasa](https://github.com/iblancasa))
* Always generate new oauth-proxy password ([#2333](https://github.com/jaegertracing/jaeger-operator/pull/2333), [@pavolloffay](https://github.com/pavolloffay))
* Add v1.48.x and v1.49.x to the support map ([#2332](https://github.com/jaegertracing/jaeger-operator/pull/2332), [@ishaqkhattana](https://github.com/ishaqkhattana))
* Pass proxy env vars to operands ([#2330](https://github.com/jaegertracing/jaeger-operator/pull/2330), [@pavolloffay](https://github.com/pavolloffay))
* Protect auth delegator behind a mutex ([#2318](https://github.com/jaegertracing/jaeger-operator/pull/2318), [@iblancasa](https://github.com/iblancasa))
## v1.49.1 (2023-09-07)
* fix: protect the kafka-profision setting behind a mutex ([#2308](https://github.com/jaegertracing/jaeger-operator/pull/2308), [@iblancasa](https://github.com/iblancasa))
## v1.48.1 (2023-09-04)
* Use base image that does not require subscription (centos 9 stream) ([#2313](https://github.com/jaegertracing/jaeger-operator/pull/2313), [@pavolloffay](https://github.com/pavolloffay))
* Update go dependencies to Kubernetes 0.28.1 ([#2301](https://github.com/jaegertracing/jaeger-operator/pull/2301), [@pavolloffay](https://github.com/pavolloffay))
* Protect the ESProvisioning setting behind a mutex ([#2287](https://github.com/jaegertracing/jaeger-operator/pull/2287), [@iblancasa](https://github.com/iblancasa))
## v1.48.0 (2023-08-28)
* Remove the TokenReview after checking we can create it ([#2286](https://github.com/jaegertracing/jaeger-operator/pull/2286), [@iblancasa](https://github.com/iblancasa))
* Fix apiVersion and kind are missing in jaeger-operator generate output ([#2281](https://github.com/jaegertracing/jaeger-operator/pull/2281), [@hiteshwani29](https://github.com/hiteshwani29))
* Fix custom labels for the deployable components in production strategy ([#2277](https://github.com/jaegertracing/jaeger-operator/pull/2277), [@hiteshwani29](https://github.com/hiteshwani29))
* Ensure the OAuth Proxy image detection is run after the platform detection ([#2280](https://github.com/jaegertracing/jaeger-operator/pull/2280), [@iblancasa](https://github.com/iblancasa))
* Added changes to respect env variable set from envFrom configMaps ([#2272](https://github.com/jaegertracing/jaeger-operator/pull/2272), [@hiteshwani29](https://github.com/hiteshwani29))
* Refactor the autodetect module to reduce the number of writes/reads in viper configuration ([#2274](https://github.com/jaegertracing/jaeger-operator/pull/2274), [@iblancasa](https://github.com/iblancasa))
## v1.47.0 (2023-07-12)
* Expose admin ports for agent, collector, and query Deployments via the equivalent Service ([#2262](https://github.com/jaegertracing/jaeger-operator/pull/2262), [@thomaspaulin](https://github.com/thomaspaulin))
* update otel sdk to v1.16.0/v0.39.0 ([#2261](https://github.com/jaegertracing/jaeger-operator/pull/2261), [@frzifus](https://github.com/frzifus))
* Extended compatibility matrix ([#2255](https://github.com/jaegertracing/jaeger-operator/pull/2255), [@shazib-summar](https://github.com/shazib-summar))
* Add support for Kubernetes 1.27 ([#2235](https://github.com/jaegertracing/jaeger-operator/pull/2235), [@iblancasa](https://github.com/iblancasa))
* Jaeger Collector Config: `Lifecycle` and `TerminationGracePeriodSeconds` ([#2242](https://github.com/jaegertracing/jaeger-operator/pull/2242), [@taj-p](https://github.com/taj-p))
## v1.46.0 (2023-06-16)
* Missing exposed port 16685 in query deployments ([#2239](https://github.com/jaegertracing/jaeger-operator/pull/2239), [@iblancasa](https://github.com/iblancasa))
* Use Golang 1.20 ([#2205](https://github.com/jaegertracing/jaeger-operator/pull/2205), [@iblancasa](https://github.com/iblancasa))
* [BugFix] Properly set imagePullPolicy and containerSecurityContext for EsIndexCleaner cronjob container ([#2224](https://github.com/jaegertracing/jaeger-operator/pull/2224), [@michalschott](https://github.com/michalschott))
* Remove resource limitation for the operator pod ([#2221](https://github.com/jaegertracing/jaeger-operator/pull/2221), [@iblancasa](https://github.com/iblancasa))
* Add PriorityClass for AllInOne strategy ([#2218](https://github.com/jaegertracing/jaeger-operator/pull/2218), [@sonofgibs](https://github.com/sonofgibs))
## v1.45.0 (2023-05-16)
## v1.44.0 (2023-04-13)
* Feat: add `NodeSelector` to jaeger collector, query, and ingestor ([#2200](https://github.com/jaegertracing/jaeger-operator/pull/2200), [@AhmedGrati](https://github.com/AhmedGrati))
## v1.43.0 (2023-02-07)
* update operator-sdk to 1.27.0 ([#2178](https://github.com/jaegertracing/jaeger-operator/pull/2178), [@iblancasa](https://github.com/iblancasa))
* Support JaegerCommonSpec in JaegerCassandraCreateSchemaSpec ([#2176](https://github.com/jaegertracing/jaeger-operator/pull/2176), [@haanhvu](https://github.com/haanhvu))
## v1.42.0 (2023-02-07)
* Upgrade Kafka Operator default version to 0.32.0 ([#2150](https://github.com/jaegertracing/jaeger-operator/pull/2150), [@iblancasa](https://github.com/iblancasa))
* Upgrade Kind, Kind images and add Kubernetes 1.26 ([#2161](https://github.com/jaegertracing/jaeger-operator/pull/2161), [@iblancasa](https://github.com/iblancasa))
1.41.1 (2023-01-23)
-------------------
* Fix the Jaeger version for the Jaeger Operator 1.41.x ([#2157](https://github.com/jaegertracing/jaeger-operator/pull/2157), [@iblancasa](https://github.com/iblancasa))
1.40.0 (2022-12-23)
-------------------
* Support e2e tests on multi architecture environment ([#2139](https://github.com/jaegertracing/jaeger-operator/pull/2139), [@jkandasa](https://github.com/jkandasa))
* limit the get of deployments to WATCH_NAMESPACE on sync ([#2126](https://github.com/jaegertracing/jaeger-operator/pull/2126), [@rubenvp8510](https://github.com/rubenvp8510))
* choose first server address ([#2087](https://github.com/jaegertracing/jaeger-operator/pull/2087), [@Efrat19](https://github.com/Efrat19))
* Fix query ingress when using streaming strategy ([#2120](https://github.com/jaegertracing/jaeger-operator/pull/2120), [@kevinearls](https://github.com/kevinearls))
* Fix Liveness Probe for Ingester and Query ([#2122](https://github.com/jaegertracing/jaeger-operator/pull/2122), [@ricoberger](https://github.com/ricoberger))
* Fix for min tls version to v1.2 ([#2119](https://github.com/jaegertracing/jaeger-operator/pull/2119), [@kangsheng89](https://github.com/kangsheng89))
1.39.0 (2022-11-03)
-------------------
* Fix: svc port doesnt match istio convention ([#2101](https://github.com/jaegertracing/jaeger-operator/pull/2101), [@frzifus](https://github.com/frzifus))
1.38.1 (2022-10-11)
-------------------
* Add ability to specify es proxy resources ([#2079](https://github.com/jaegertracing/jaeger-operator/pull/2079), [@rubenvp8510](https://github.com/rubenvp8510))
* Fix: CVE-2022-27664 ([#2081](https://github.com/jaegertracing/jaeger-operator/pull/2081), [@albertlockett](https://github.com/albertlockett))
* Add liveness and readiness probes to injected sidecar ([#2077](https://github.com/jaegertracing/jaeger-operator/pull/2077), [@MacroPower](https://github.com/MacroPower))
* Add http- port prefix to follow istio naming conventions ([#2075](https://github.com/jaegertracing/jaeger-operator/pull/2075), [@cnvergence](https://github.com/cnvergence))
1.38.0 (2022-09-19)
-------------------
* added pathType to ingress ([#2066](https://github.com/jaegertracing/jaeger-operator/pull/2066), [@giautm](https://github.com/giautm))
* set alias enable variable for spark cronjob ([#2061](https://github.com/jaegertracing/jaeger-operator/pull/2061), [@miyunari](https://github.com/miyunari))
* migrate autoscaling v2beta2 to v2 for Kubernetes 1.26 ([#2055](https://github.com/jaegertracing/jaeger-operator/pull/2055), [@iblancasa](https://github.com/iblancasa))
* add container security context support ([#2033](https://github.com/jaegertracing/jaeger-operator/pull/2033), [@mjnagel](https://github.com/mjnagel))
* change verbosity level and message of the log for autoprovisioned kafka ([#2026](https://github.com/jaegertracing/jaeger-operator/pull/2026), [@iblancasa](https://github.com/iblancasa))
1.37.0 (2022-08-11)
-------------------
* Upgrade operator-sdk to 1.22.2 ([#2021](https://github.com/jaegertracing/jaeger-operator/pull/2021), [@iblancasa](https://github.com/iblancasa))
* es-dependencies: support image pull secret ([#2012](https://github.com/jaegertracing/jaeger-operator/pull/2012), [@frzifus](https://github.com/frzifus))
1.36.0 (2022-07-18)
-------------------
* added flag to change webhook port ([#1991](https://github.com/jaegertracing/jaeger-operator/pull/1991), [@klubi](https://github.com/klubi))
* Upgrade operator-sdk to 1.22.0 ([#1951](https://github.com/jaegertracing/jaeger-operator/pull/1951), [@iblancasa](https://github.com/iblancasa))
* Add elasticsearch storage date format config. ([#1325](https://github.com/jaegertracing/jaeger-operator/pull/1325), [@sniperking1234](https://github.com/sniperking1234))
* Add support for custom liveness probe ([#1605](https://github.com/jaegertracing/jaeger-operator/pull/1605), [@ricoberger](https://github.com/ricoberger))
* Add service annotations ([#1526](https://github.com/jaegertracing/jaeger-operator/pull/1526), [@herbguo](https://github.com/herbguo))
1.35.0 (2022-06-16)
-------------------
* fix: point to a newer openshift oauth image 4.12 ([#1955](https://github.com/jaegertracing/jaeger-operator/pull/1955), [@frzifus](https://github.com/frzifus))
* Expose OTLP collector and allInOne ports ([#1948](https://github.com/jaegertracing/jaeger-operator/pull/1948), [@rubenvp8510](https://github.com/rubenvp8510))
* Add support for ImagePullSecrets in cronjobs ([#1935](https://github.com/jaegertracing/jaeger-operator/pull/1935), [@alexandrevilain](https://github.com/alexandrevilain))
* fix: ocp es rollover #1932 ([#1937](https://github.com/jaegertracing/jaeger-operator/pull/1937), [@frzifus](https://github.com/frzifus))
* add kafkaSecretName for collector and ingester ([#1910](https://github.com/jaegertracing/jaeger-operator/pull/1910), [@luohua13](https://github.com/luohua13))
* Add autoscalability E2E test for OpenShift ([#1936](https://github.com/jaegertracing/jaeger-operator/pull/1936), [@iblancasa](https://github.com/iblancasa))
* Fix version in Docker container. ([#1924](https://github.com/jaegertracing/jaeger-operator/pull/1924), [@iblancasa](https://github.com/iblancasa))
* Verify namespace permissions before adding ns controller ([#1914](https://github.com/jaegertracing/jaeger-operator/pull/1914), [@rubenvp8510](https://github.com/rubenvp8510))
* fix: skip dependencies on openshift platform ([#1921](https://github.com/jaegertracing/jaeger-operator/pull/1921), [@frzifus](https://github.com/frzifus))
* fix: remove common name label ([#1920](https://github.com/jaegertracing/jaeger-operator/pull/1920), [@frzifus](https://github.com/frzifus))
* Ignore not found error on 1.31.0 upgrade routine ([#1913](https://github.com/jaegertracing/jaeger-operator/pull/1913), [@rubenvp8510](https://github.com/rubenvp8510))
1.34.1 (2022-05-24)
-------------------
Fix: storage.es.tls.enabled flag not passed to es-index-cleaner ([#1896](https://github.com/jaegertracing/jaeger-operator/pull/1896), [@indigostar-kr](https://github.com/indigostar-kr))
1.34.0 (2022-05-18)
-------------------
* Fix: jaeger operator fails to parse Jaeger instance version ([#1885](https://github.com/jaegertracing/jaeger-operator/pull/1885), [@rubenvp8510](https://github.com/rubenvp8510))
* Support Kubernetes 1.24 ([#1882](https://github.com/jaegertracing/jaeger-operator/pull/1882), [@iblancasa](https://github.com/iblancasa))
* Cronjob migration ([#1856](https://github.com/jaegertracing/jaeger-operator/pull/1856), [@kevinearls](https://github.com/kevinearls))
* Fix: setting default Istio annotation in Pod instead of Deployment ([#1860](https://github.com/jaegertracing/jaeger-operator/pull/1860), [@cnvergence](https://github.com/cnvergence))
* Add http- prefix to port names in collector and agent services ([#1862](https://github.com/jaegertracing/jaeger-operator/pull/1862), [@cnvergence](https://github.com/cnvergence))
1.33.0 (2022-04-14)
-------------------
* Adding priority-class for esIndexCleaner ([#1732](https://github.com/jaegertracing/jaeger-operator/pull/1732), [@swapnilpotnis](https://github.com/swapnilpotnis))
* Fix: webhook deadlock ([#1850](https://github.com/jaegertracing/jaeger-operator/pull/1850), [@frzifus](https://github.com/frzifus))
* Fix: take namespace modifications into account ([#1839](https://github.com/jaegertracing/jaeger-operator/pull/1839), [@frzifus](https://github.com/frzifus))
* Replace deployment reconciler with webhook ([#1828](https://github.com/jaegertracing/jaeger-operator/pull/1828), [@frzifus](https://github.com/frzifus))
* Add managed by metric ([#1831](https://github.com/jaegertracing/jaeger-operator/pull/1831), [@rubenvp8510](https://github.com/rubenvp8510))
* Fix admissionReviews version for operator-sdk upgrade ([#1827](https://github.com/jaegertracing/jaeger-operator/pull/1827), [@kevinearls](https://github.com/kevinearls))
* Make RHOL Elasticsearch cert-management feature optional ([#1824](https://github.com/jaegertracing/jaeger-operator/pull/1824), [@pavolloffay](https://github.com/pavolloffay))
* Update the operator-sdk to v1.17.0 ([#1825](https://github.com/jaegertracing/jaeger-operator/pull/1825), [@kevinearls](https://github.com/kevinearls))
* Fix metrics selectors ([#1742](https://github.com/jaegertracing/jaeger-operator/pull/1742), [@rubenvp8510](https://github.com/rubenvp8510))
1.32.0 (2022-03-09)
-------------------
* Custom Image Pull Policy ([#1798](https://github.com/jaegertracing/jaeger-operator/pull/1798), [@edenkoveshi](https://github.com/edenkoveshi))
* add METRICS_STORAGE_TYPE for metrics query ([#1755](https://github.com/jaegertracing/jaeger-operator/pull/1755), [@JaredTan95](https://github.com/JaredTan95))
* Make operator more resiliant to etcd defrag activity ([#1795](https://github.com/jaegertracing/jaeger-operator/pull/1795), [@pavolloffay](https://github.com/pavolloffay))
* Automatically set num shards and replicas from referenced OCP ES ([#1737](https://github.com/jaegertracing/jaeger-operator/pull/1737), [@pavolloffay](https://github.com/pavolloffay))
* support image pull secrets ([#1740](https://github.com/jaegertracing/jaeger-operator/pull/1740), [@frzifus](https://github.com/frzifus))
* Fix webhook secret cert name ([#1772](https://github.com/jaegertracing/jaeger-operator/pull/1772), [@rubenvp8510](https://github.com/rubenvp8510))
1.31.0 (2022-02-09)
-------------------
* Fix panic caused by an invalid type assertion ([#1738](https://github.com/jaegertracing/jaeger-operator/pull/1738), [@frzifus](https://github.com/frzifus))
* Add ES autoprovisioning CR metric ([#1728](https://github.com/jaegertracing/jaeger-operator/pull/1728), [@rubenvp8510](https://github.com/rubenvp8510))
* Use Elasticsearch provisioning from OpenShift Elasticsearch operator ([#1708](https://github.com/jaegertracing/jaeger-operator/pull/1708), [@pavolloffay](https://github.com/pavolloffay))
1.30.0 (2022-01-18)
-------------------
* Only expose the query-http[s] port in the OpenShift route ([#1719](https://github.com/jaegertracing/jaeger-operator/pull/1719), [@rkukura](https://github.com/rkukura))
* Add CR Metrics for Jaeger Kind. ([#1706](https://github.com/jaegertracing/jaeger-operator/pull/1706), [@rubenvp8510](https://github.com/rubenvp8510))
* Avoid calling k8s api for each resource kind on the cluster ([#1712](https://github.com/jaegertracing/jaeger-operator/pull/1712), [@rubenvp8510](https://github.com/rubenvp8510))
* First call of autodetect should be synchronous ([#1713](https://github.com/jaegertracing/jaeger-operator/pull/1713), [@rubenvp8510](https://github.com/rubenvp8510))
* Add permissions for imagestreams ([#1714](https://github.com/jaegertracing/jaeger-operator/pull/1714), [@rubenvp8510](https://github.com/rubenvp8510))
* Restore default metrics port to avoid breaking helm ([#1703](https://github.com/jaegertracing/jaeger-operator/pull/1703), [@rubenvp8510](https://github.com/rubenvp8510))
* Add leases permissions to manifest. ([#1704](https://github.com/jaegertracing/jaeger-operator/pull/1704), [@rubenvp8510](https://github.com/rubenvp8510))
* Change spark-dependencies image to GHCR ([#1701](https://github.com/jaegertracing/jaeger-operator/pull/1701), [@pavolloffay](https://github.com/pavolloffay))
* Register ES types ([#1688](https://github.com/jaegertracing/jaeger-operator/pull/1688), [@rubenvp8510](https://github.com/rubenvp8510))
* Add support for IBM Power (ppc64le) arch ([#1672](https://github.com/jaegertracing/jaeger-operator/pull/1672), [@Abhijit-Mane](https://github.com/Abhijit-Mane))
* util.Truncate add the values to the truncated after the excess is 0 ([#1678](https://github.com/jaegertracing/jaeger-operator/pull/1678), [@mmatache](https://github.com/mmatache))
1.29.1 (2021-12-15)
-------------------
* Register oschema for openshift resources ([#1673](https://github.com/jaegertracing/jaeger-operator/pull/1673), [@rubenvp8510](https://github.com/rubenvp8510))
1.29.0 (2021-12-10)
-------------------
* Fix default namespace ([#1651](https://github.com/jaegertracing/jaeger-operator/pull/1651), [@rubenvp8510](https://github.com/rubenvp8510))
* Fix finding the correct instance when there are multiple jaeger instances during injecting the sidecar ([#1639](https://github.com/jaegertracing/jaeger-operator/pull/1639), [@alibo](https://github.com/alibo))
* Migrate to operator-sdk 1.13 ([#1623](https://github.com/jaegertracing/jaeger-operator/pull/1623), [@rubenvp8510](https://github.com/rubenvp8510))
1.28.0 (2021-11-08)
-------------------
* Use CRDs to detect features in the cluster ([#1608](https://github.com/jaegertracing/jaeger-operator/pull/1608), [@pavolloffay](https://github.com/pavolloffay))
* Make ServiceMonitor creation optional ([#1323](https://github.com/jaegertracing/jaeger-operator/pull/1323), [@igorwwwwwwwwwwwwwwwwwwww](https://github.com/igorwwwwwwwwwwwwwwwwwwww))
* Change default OpenShift query ingress SAR to pods in the jaeger namespace ([#1583](https://github.com/jaegertracing/jaeger-operator/pull/1583), [@pavolloffay](https://github.com/pavolloffay))
* Fix gRPC flags for OpenShift when 'reporter.grpc.host-port' is defined ([#1584](https://github.com/jaegertracing/jaeger-operator/pull/1584), [@Git-Jiro](https://github.com/Git-Jiro))
1.27.0 (2021-10-07)
-------------------
* Allow sidecar injection for query pod from other Jaeger instances ([#1569](https://github.com/jaegertracing/jaeger-operator/pull/1569), [@pavolloffay](https://github.com/pavolloffay))
* Avoid touching jaeger deps on deployment/ns controller ([#1529](https://github.com/jaegertracing/jaeger-operator/pull/1529), [@rubenvp8510](https://github.com/rubenvp8510))
1.26.0 (2021-09-30)
-------------------
* Add ingressClassName field to query ingress ([#1557](https://github.com/jaegertracing/jaeger-operator/pull/1557), [@rubenvp8510](https://github.com/rubenvp8510))
* Add disconnected annotation to csv ([#1536](https://github.com/jaegertracing/jaeger-operator/pull/1536), [@rubenvp8510](https://github.com/rubenvp8510))
1.25.0 (2021-08-08)
-------------------

View File

@ -1,34 +0,0 @@
The following table shows the compatibility of Jaeger Operator with three different components: Kubernetes, Strimzi Operator, and Cert-Manager.
| Jaeger Operator | Kubernetes | Strimzi Operator | Cert-Manager |
|-----------------|----------------|--------------------|--------------|
| v1.62.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.61.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.60.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
| v1.59.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.58.x | skipped | skipped | skipped |
| v1.57.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.56.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.55.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.54.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.53.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.52.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.51.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.50.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.49.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
| v1.48.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.47.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
| v1.46.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.45.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.44.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.43.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.42.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
| v1.41.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.40.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.39.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.38.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
| v1.37.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.36.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.35.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.34.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
| v1.33.x | v1.19 to v1.23 | v0.23 | v1.6.1 |

View File

@ -6,79 +6,96 @@ This project is [Apache 2.0 licensed](LICENSE) and accepts contributions via Git
We gratefully welcome improvements to documentation as well as to code.
## Getting Started
This project is a regular [Kubernetes Operator](https://coreos.com/operators/) built using the Operator SDK. Refer to the Operator SDK documentation to understand the basic architecture of this operator.
## Installing the Operator SDK command line tool
### Installing the Operator SDK command line tool
Follow the installation guidelines from [Operator SDK GitHub page](https://github.com/operator-framework/operator-sdk)
Follow the installation guidelines from [Operator SDK GitHub page](https://github.com/operator-framework/operator-sdk) or run `make install-sdk`.
## Developing
### Developing
As usual for operators following the Operator SDK in recent versions, the dependencies are managed using [`go modules`](https://golang.org/doc/go1.11#modules). Refer to that project's documentation for instructions on how to add or update dependencies.
The first step is to get a local Kubernetes instance up and running. The recommended approach for development is using `minikube` with *ingress* enabled. Refer to the Kubernetes' [documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) for instructions on how to install it.
The first step is to get a local Kubernetes instance up and running. The recommended approach is using `minikube`. Refer to the Kubernetes' [documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) for instructions on how to install it.
Once `minikube` is installed, it can be started with:
```sh
minikube start --addons=ingress
```
minikube start
```
NOTE: Make sure to read the documentation to learn the performance switches that can be applied to your platform.
Log into docker (or another image registry):
```sh
docker login --username <dockerusername>
```
Once minikube has finished starting, get the Operator running:
```sh
make cert-manager
IMG=docker.io/$USER/jaeger-operator:latest make generate bundle docker push deploy
```
NOTE: If your registry username is not the same as $USER, modify the previous command before executing it. Also change *docker.io* if you are using a different image registry.
```
make run
```
At this point, a Jaeger instance can be installed:
```sh
```
kubectl apply -f examples/simplest.yaml
kubectl get jaegers
kubectl get pods
```
To verify the Jaeger instance is running, execute *minikube ip* and open that address in a browser, or follow the steps below
```sh
export MINIKUBE_IP=`minikube ip`
curl http://{$MINIKUBE_IP}/api/services
```
NOTE: you may have to execute the *curl* command twice to get a non-empty result
Tests should be simple unit tests and/or end-to-end tests. For small changes, unit tests should be sufficient, but every new feature should be accompanied with end-to-end tests as well. Tests can be executed with:
```sh
make test
```
#### Cleaning up
To remove the instance:
```sh
```
kubectl delete -f examples/simplest.yaml
```
Tests should be simple unit tests and/or end-to-end tests. For small changes, unit tests should be sufficient, but every new feature should be accompanied with end-to-end tests as well. Tests can be executed with:
```
make test
```
NOTE: you can adjust the Docker image namespace by overriding the variable `NAMESPACE`, like: `make test NAMESPACE=quay.io/my-username`. The full Docker image name can be customized by overriding `BUILD_IMAGE` instead, like: `make test BUILD_IMAGE=quay.io/my-username/jaeger-operator:0.0.1`
#### Model changes
The Operator SDK generates the `pkg/apis/jaegertracing/v1/zz_generated.*.go` files via the command `make generate`. This should be executed whenever there's a model change (`pkg/apis/jaegertracing/v1/jaeger_types.go`)
### Storage configuration
#### Ingress configuration
Kubernetes comes with no ingress provider by default. For development purposes, when running `minikube`, the following command can be executed to install an ingress provider:
```
make ingress
```
This will install the `NGINX` ingress provider. It's recommended to wait for the ingress pods to be in the `READY` and `RUNNING` state before starting the operator. You can check it by running:
```
kubectl get pods -n ingress-nginx
```
To verify that it's working, deploy the `simplest.yaml` and check the ingress routes:
```
$ kubectl apply -f examples/simplest.yaml
jaeger.jaegertracing.io/simplest created
$ kubectl get ingress
NAME HOSTS ADDRESS PORTS AGE
simplest-query * 192.168.122.69 80 12s
```
Accessing the provided "address" in your web browser should display the Jaeger UI.
#### Storage configuration
There are a set of templates under the `test` directory that can be used to setup an Elasticsearch and/or Cassandra cluster. Alternatively, the following commands can be executed to install it:
```sh
```
make es
make cassandra
```
### Operator-Lifecycle-Manager Integration
#### Operator-Lifecycle-Manager Integration
The [Operator-Lifecycle-Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager/) can install, manage, and upgrade operators and their dependencies in a cluster.
@ -90,22 +107,23 @@ With OLM, users can:
OLM also enforces some constraints on the components it manages in order to ensure a good user experience.
The Jaeger community provides and maintains a [ClusterServiceVersion (CSV) YAML](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md) to integrate with OLM.
The Jaeger community provides and maintains a [ClusterServiceVersion (CSV) YAML](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/Documentation/design/building-your-csv.md/) to integrate with OLM.
Starting from operator-sdk v0.5.0, one can generate and update CSVs based on the yaml files in the deploy folder.
The Jaeger CSV can be updated to version 1.9.0 with the following command:
```sh
```
$ operator-sdk generate csv --csv-version 1.9.0
INFO[0000] Generating CSV manifest version 1.9.0
INFO[0000] Create deploy/olm-catalog/jaeger-operator.csv.yaml
INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml
INFO[0000] Create deploy/olm-catalog/jaeger-operator.csv.yaml
INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml
```
The generated CSV yaml should then be compared and used to update the `deploy/olm-catalog/jaeger.clusterserviceversion.yaml` file which represents the stable version copied to the operatorhub following each jaeger operator release. Once merged, the `jaeger-operator.csv.yaml` file should be removed.
The generated CSV yaml should then be compared and used to update the deploy/olm-catalog/jaeger.clusterserviceversion.yaml file which represents the stable version copied to the operatorhub following each jaeger operator release. Once merged, the jaeger-operator.csv.yaml file should be removed.
The `jaeger.clusterserviceversion.yaml` file can then be tested with this command:
```sh
The jaeger.clusterserviceversion.yaml file can then be tested with this command:
```
$ operator-sdk scorecard --cr-manifest examples/simplest.yaml --csv-path deploy/olm-catalog/jaeger.clusterserviceversion.yaml --init-timeout 30
Checking for existence of spec and status blocks in CR
Checking that operator actions are reflected in status
@ -128,113 +146,50 @@ OLM Integration:
Total Score: 4/18 points
```
## E2E tests
#### E2E tests
### Requisites
Before running the E2E tests you need to install:
* [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation): a tool for running local Kubernetes clusters
* [KUTTL](https://kuttl.dev/docs/cli.html#setup-the-kuttl-kubectl-plugin): a tool to run the Kubernetes tests
### Runing the E2E tests
#### Using KIND cluster
The whole set of end-to-end tests can be executed via:
```sh
$ make run-e2e-tests
```
$ make e2e-tests
```
The end-to-end tests are split into tags and can be executed in separate groups, such as:
```sh
$ make run-e2e-tests-examples
```
$ make e2e-tests-smoke
```
Other targets include `run-e2e-tests-cassandra` and `run-e2e-tests-elasticsearch`. You can list them running:
```sh
$ make e2e-test-suites
Other targets include `e2e-tests-cassandra` and `e2e-tests-elasticsearch`. Refer to the `Makefile` for an up-to-date list of targets.
If you face issues like the one below, make sure you don't have any Jaeger instances (`kubectl get jaegers`) running nor Ingresses (`kubectl get ingresses`):
```
--- FAIL: TestSmoke (316.59s)
--- FAIL: TestSmoke/smoke (316.55s)
--- FAIL: TestSmoke/smoke/daemonset (115.54s)
...
...
daemonset.go:30: timed out waiting for the condition
...
...
```
**Note**: there are some variables you need to take into account in order to
improve your experience running the E2E tests.
##### Kuttl E2E tests
| Variable name | Description | Example usage |
|-------------------|-----------------------------------------------------|------------------------------------|
| KUTTL_OPTIONS | Options to pass directly to the KUTTL call | KUTTL_OPTIONS="--test es-rollover" |
| E2E_TESTS_TIMEOUT | Timeout for each step in the E2E tests. In seconds | E2E_TESTS_TIMEOUT=500 |
| USE_KIND_CLUSTER | Start a KIND cluster to run the E2E tests | USE_KIND_CLUSTER=true |
| KIND_KEEP_CLUSTER | Not remove the KIND cluster after running the tests | KIND_KEEP_CLUSTER=true |
There are some tests that uses [Kuttl](https://kuttl.dev/), those tests can be executed via:
Also, you can enable/disable the installation of the different operators needed
to run the tests:
| Variable name | Description | Example usage |
|----------------|---------------------------------------------|---------------------|
| JAEGER_OLM | Jaeger Operator was installed using OLM | JAEGER_OLM=true |
| KAFKA_OLM | Kafka Operator was installed using OLM | KAFKA_OLM=true |
| PROMETHEUS_OLM | Prometheus Operator was installed using OLM | PROMETHEUS_OLM=true |
#### An external cluster (like OpenShift)
The commands from the previous section are valid when running the E2E tests in an
external cluster like OpenShift, minikube or other Kubernetes environment. The only
difference are:
* You need to log in your Kubernetes cluster before running the E2E tests
* You need to provide the `USE_KIND_CLUSTER=false` parameter when calling `make`
```sh
$ make run-e2e-tests USE_KIND_CLUSTER=false
```
$ make kuttl-e2e
```
### Developing new E2E tests
E2E tests are located under `tests/e2e`. Each folder is associated to an E2E test suite. The
Tests are developed using KUTTL. Before developing a new test, [learn how KUTTL test works](https://kuttl.dev/docs/what-is-kuttl.html).
To add a new suite, it is needed to create a new folder with the name of the suite under `tests/e2e`.
Each suite folder contains:
* `Makefile`: describes the rules associated to rendering the files needed for your tests and run the tests
* `render.sh`: renders all the files needed for your tests (or to skip them)
* A folder per test to run
When the test are rendered, each test folder is copied to `_build`. The files generated
by `render.sh` are created under `_build/<test name>`.
##### Makefile
The `Makefile` file must contain two rules:
```Makefile
render-e2e-tests-<suite name>: set-assert-e2e-img-name
./tests/e2e/<suite name>/render.sh
run-e2e-tests-<suite name>: TEST_SUITE_NAME=<suite name>
run-e2e-tests-<suite name>: run-suite-tests
```
Where `<suite name>` is the name of your E2E test suite. Your E2E test suite
will be automatically indexed in the `run-e2e-tests` Makefile target.
##### render.sh
This file renders all the YAML files that are part of the E2E test. The `render.sh`
file must start with:
```bash
#!/bin/bash
source $(dirname "$0")/../render-utils.sh
```
The `render-utils.sh` file contains multiple functions to make easier to develop E2E tests and reuse logic. You can go to it and review the documentation of each one of the functions to
understand their parameters and effects.
You first need to install [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) in order to run the based kuttle e2e tests
#### Building [OCI Images](https://github.com/opencontainers/image-spec/blob/master/spec.md) for multiple arch (linux/arm64, linux/amd64)
OCI images could be built and published by [buildx](https://github.com/docker/buildx), it could be executed for local test via:
```sh
```
$ OPERATOR_VERSION=devel ./.ci/publish-images.sh
```
@ -244,7 +199,7 @@ if we want to execute this in local env, need to setup buildx:
1. install docker cli plugin
```sh
```
$ export DOCKER_BUILDKIT=1
$ docker build --platform=local -o . git://github.com/docker/buildx
$ mkdir -p ~/.docker/cli-plugins
@ -254,13 +209,13 @@ $ mv buildx ~/.docker/cli-plugins/docker-buildx
2. install qemu for multi arch
```sh
```
$ docker run --privileged --rm tonistiigi/binfmt --install all
```
(via https://github.com/docker/buildx#building-multi-platform-images)
3. create a builder
```sh
```
$ docker buildx create --use --name builder
```

View File

@ -1,56 +0,0 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
COPY hack/install/install-dependencies.sh hack/install/
COPY hack/install/install-utils.sh hack/install/
COPY go.mod .
COPY go.sum .
RUN ./hack/install/install-dependencies.sh
# Copy the go source
COPY main.go main.go
COPY apis/ apis/
COPY cmd/ cmd/
COPY controllers/ controllers/
COPY pkg/ pkg/
COPY versions.txt versions.txt
ARG JAEGER_VERSION
ARG JAEGER_AGENT_VERSION
ARG VERSION_PKG
ARG VERSION
ARG VERSION_DATE
# Dockerfile `FROM --platform=${BUILDPLATFORM}` means
# prepare image for build for matched BUILDPLATFORM, eq. linux/amd64
# by this way, we could avoid to using qemu, which slow down compiling process.
# and usefully for language who support multi-arch build like go.
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
ARG TARGETARCH
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION} -X ${VERSION_PKG}.defaultAgent=${JAEGER_AGENT_VERSION}" -a -o jaeger-operator main.go
FROM quay.io/centos/centos:stream9
ENV USER_UID=1001 \
USER_NAME=jaeger-operator
RUN INSTALL_PKGS="openssl" && \
dnf install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
dnf clean all && \
mkdir /tmp/_working_dir && \
chmod og+w /tmp/_working_dir
WORKDIR /
COPY --from=builder /workspace/jaeger-operator .
COPY scripts/cert_generation.sh scripts/cert_generation.sh
USER ${USER_UID}:${USER_UID}
ENTRYPOINT ["/jaeger-operator"]

View File

@ -1,35 +1,20 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
FROM golang:1.16 as builder
WORKDIR /workspace
COPY . /go/src/github.com/jaegertracing/jaeger-operator/
WORKDIR /go/src/github.com/jaegertracing/jaeger-operator
# Download the dependencies. Doing this, if there are changes in the source
# code but not in the dependencies to download, the tool to build the image will
# use the cached dependencies
COPY hack/install/install-dependencies.sh hack/install/
COPY hack/install/install-utils.sh hack/install/
COPY go.mod .
COPY go.sum .
RUN ./hack/install/install-dependencies.sh
COPY tests tests
ENV CGO_ENABLED=0
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Build
ARG TARGETOS
ARG TARGETARCH
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./uiconfig -a ./tests/assert-jobs/uiconfig/main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./query -a ./tests/assert-jobs/query/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter-otlp -a ./tests/assert-jobs/reporter-otlp/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./query -a ./tests/assert-jobs/query/main.go
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./index -a ./tests/assert-jobs/index/main.go
# Use the curl container image to ensure we have curl installed. Also, it is a
# minimal container image
FROM curlimages/curl@sha256:94e9e444bcba979c2ea12e27ae39bee4cd10bc7041a472c4727a558e213744e6
FROM scratch
WORKDIR /
COPY --from=builder /workspace/reporter .
COPY --from=builder /workspace/reporter-otlp .
COPY --from=builder /workspace/query .
COPY --from=builder /workspace/index .
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/uiconfig .
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/reporter .
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/query .

750
Makefile
View File

@ -1,518 +1,508 @@
include tests/e2e/Makefile
# When the VERBOSE variable is set to 1, all the commands are shown
ifeq ("$(VERBOSE)","true")
echo_prefix=">>>>"
else
VECHO = @
endif
VERSION_DATE ?= $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x
GOARCH ?= $(go env GOARCH)
GOOS ?= $(go env GOOS)
GO_FLAGS ?= GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 GO111MODULE=on
GOPATH ?= "$(HOME)/go"
GOROOT ?= "$(shell go env GOROOT)"
KUBERNETES_CONFIG ?= "$(HOME)/.kube/config"
WATCH_NAMESPACE ?= ""
BIN_DIR ?= bin
BIN_DIR ?= "build/_output/bin"
IMPORT_LOG=import.log
FMT_LOG=fmt.log
ECHO ?= @echo $(echo_prefix)
SED ?= "sed"
# Jaeger Operator build variables
OPERATOR_NAME ?= jaeger-operator
IMG_PREFIX ?= quay.io/${USER}
OPERATOR_VERSION ?= "$(shell grep -v '\#' versions.txt | grep operator | awk -F= '{print $$2}')"
VERSION ?= "$(shell grep operator= versions.txt | awk -F= '{print $$2}')"
IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}:${VERSION}
BUNDLE_IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}-bundle:$(addprefix v,${VERSION})
OUTPUT_BINARY ?= "$(BIN_DIR)/jaeger-operator"
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
export JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
# agent was removed in jaeger 1.62.0, and the new versions of jaeger doesn't distribute the images anymore
# for that reason the last version of the agent is 1.62.0 and is pined here so we can update jaeger and maintain
# the latest agent image.
export JAEGER_AGENT_VERSION ?= "1.62.0"
# Kafka and Kafka Operator variables
OPERATOR_NAME ?= jaeger-operator
NAMESPACE ?= "$(USER)"
BUILD_IMAGE ?= "$(NAMESPACE)/$(OPERATOR_NAME):latest"
IMAGE_TAGS ?= "--tag $(BUILD_IMAGE)"
OUTPUT_BINARY ?= "$(BIN_DIR)/$(OPERATOR_NAME)"
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
OPERATOR_VERSION ?= "$(shell git describe --tags)"
STORAGE_NAMESPACE ?= "${shell kubectl get sa default -o jsonpath='{.metadata.namespace}' || oc project -q}"
KAFKA_NAMESPACE ?= "kafka"
KAFKA_VERSION ?= 0.32.0
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/${KAFKA_VERSION}/examples/kafka/kafka-persistent-single.yaml"
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${KAFKA_VERSION}/strimzi-cluster-operator-${KAFKA_VERSION}.yaml"
# Prometheus Operator variables
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/0.23.0/examples/kafka/kafka-persistent-single.yaml"
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.23.0/strimzi-cluster-operator-0.23.0.yaml"
ES_OPERATOR_NAMESPACE ?= openshift-logging
ES_OPERATOR_BRANCH ?= release-4.4
ES_OPERATOR_IMAGE ?= quay.io/openshift/origin-elasticsearch-operator:4.4
SDK_VERSION=v0.18.2
ISTIO_VERSION ?= 1.8.2
ISTIOCTL="./deploy/test/istio/bin/istioctl"
GOPATH ?= "$(HOME)/go"
GOROOT ?= "$(shell go env GOROOT)"
SED ?= "sed"
PROMETHEUS_OPERATOR_TAG ?= v0.39.0
PROMETHEUS_BUNDLE ?= https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/${PROMETHEUS_OPERATOR_TAG}/bundle.yaml
# Metrics server variables
METRICS_SERVER_TAG ?= v0.6.1
METRICS_SERVER_YAML ?= https://github.com/kubernetes-sigs/metrics-server/releases/download/${METRICS_SERVER_TAG}/components.yaml
# Ingress controller variables
INGRESS_CONTROLLER_TAG ?= v1.0.1
INGRESS_CONTROLLER_YAML ?= https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-${INGRESS_CONTROLLER_TAG}/deploy/static/provider/kind/deploy.yaml
## Location to install tool dependencies
LOCALBIN ?= $(shell pwd)/bin
# Cert manager version to use
CERTMANAGER_VERSION ?= 1.6.1
CMCTL ?= $(LOCALBIN)/cmctl
# Operator SDK
OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
OPERATOR_SDK_VERSION ?= 1.32.0
# Minimum Kubernetes and OpenShift versions
MIN_KUBERNETES_VERSION ?= 1.19.0
MIN_OPENSHIFT_VERSION ?= 4.12
# Use a KIND cluster for the E2E tests
USE_KIND_CLUSTER ?= true
# Is Jaeger Operator installed via OLM?
JAEGER_OLM ?= false
# Is Kafka Operator installed via OLM?
KAFKA_OLM ?= false
# Is Prometheus Operator installed via OLM?
PROMETHEUS_OLM ?= false
# Istio binary path and version
ISTIOCTL ?= $(LOCALBIN)/istioctl
# Tools
CRDOC ?= $(LOCALBIN)/crdoc
KIND ?= $(LOCALBIN)/kind
KUSTOMIZE ?= $(LOCALBIN)/kustomize
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(OPERATOR_VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION)"
$(LOCALBIN):
mkdir -p $(LOCALBIN)
UNIT_TEST_PACKAGES := $(shell go list ./cmd/... ./pkg/... | grep -v elasticsearch/v1 | grep -v kafka/v1beta2 | grep -v client/versioned)
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
TEST_OPTIONS = $(VERBOSE) -kubeconfig $(KUBERNETES_CONFIG) -namespacedMan ../../deploy/test/namespace-manifests.yaml -globalMan ../../deploy/test/global-manifests.yaml -root .
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION) -X $(VERSION_PKG).defaultAgent=$(JAEGER_AGENT_VERSION)"
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST ?= $(LOCALBIN)/setup-envtest
ENVTEST_K8S_VERSION = 1.30
# Options for KIND version to use
export KUBE_VERSION ?= 1.30
KUBE_VERSION ?= 1.21
KIND_CONFIG ?= kind-$(KUBE_VERSION).yaml
SCORECARD_TEST_IMG ?= quay.io/operator-framework/scorecard-test:v$(OPERATOR_SDK_VERSION)
.DEFAULT_GOAL := build
# Options for 'bundle-build'
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:maxDescLen=0,generateEmbeddedObjectMeta=true"
# If we are running in CI, run go test in verbose mode
ifeq (,$(CI))
GOTEST_OPTS=
else
GOTEST_OPTS=-v
endif
all: manager
.PHONY: check
check: install-tools
$(ECHO) Checking...
$(VECHO)./.ci/format.sh > $(FMT_LOG)
$(VECHO)[ ! -s "$(FMT_LOG)" ] || (echo "Go fmt, license check, or import ordering failures, run 'make format'" | cat - $(FMT_LOG) && false)
check:
@echo Checking...
@GOPATH=${GOPATH} .ci/format.sh > $(FMT_LOG)
@[ ! -s "$(FMT_LOG)" ] || (echo "Go fmt, license check, or import ordering failures, run 'make format'" | cat - $(FMT_LOG) && false)
ensure-generate-is-noop: VERSION=$(OPERATOR_VERSION)
ensure-generate-is-noop: set-image-controller generate bundle
$(VECHO)# on make bundle config/manager/kustomization.yaml includes changes, which should be ignored for the below check
$(VECHO)git restore config/manager/kustomization.yaml
$(VECHO)git diff -s --exit-code api/v1/zz_generated.*.go || (echo "Build failed: a model has been changed but the generated resources aren't up to date. Run 'make generate' and update your PR." && exit 1)
$(VECHO)git diff -s --exit-code bundle config || (echo "Build failed: the bundle, config files has been changed but the generated bundle, config files aren't up to date. Run 'make bundle' and update your PR." && git diff && exit 1)
$(VECHO)git diff -s --exit-code docs/api.md || (echo "Build failed: the api.md file has been changed but the generated api.md file isn't up to date. Run 'make api-docs' and update your PR." && git diff && exit 1)
.PHONY: ensure-generate-is-noop
ensure-generate-is-noop: generate format
@git diff -s --exit-code pkg/apis/jaegertracing/v1/zz_generated.*.go || (echo "Build failed: a model has been changed but the generated resources aren't up to date. Run 'make generate' and update your PR." && exit 1)
@git diff -s --exit-code pkg/client/versioned || (echo "Build failed: the versioned clients aren't up to date. Run 'make generate'." && exit 1)
.PHONY: format
format: install-tools
$(ECHO) Formatting code...
$(VECHO)./.ci/format.sh
format:
@echo Formatting code...
@GOPATH=${GOPATH} .ci/format.sh
PHONY: lint
lint: install-tools
$(ECHO) Linting...
$(VECHO)$(LOCALBIN)/golangci-lint -v run
.PHONY: lint
lint:
@echo Linting...
@GOPATH=${GOPATH} ./.ci/lint.sh
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: security
security:
@echo Security...
@${GOPATH}/bin/gosec -quiet -exclude=G104 ./... 2>/dev/null
.PHONY: build
build: format
$(ECHO) Building...
$(VECHO)./hack/install/install-dependencies.sh
$(VECHO)${GO_FLAGS} go build -ldflags $(LD_FLAGS) -o $(OUTPUT_BINARY) main.go
$(MAKE) gobuild
.PHONY: gobuild
gobuild:
@echo Building...
@${GO_FLAGS} go build -o $(OUTPUT_BINARY) -ldflags $(LD_FLAGS)
# compile the tests without running them
@${GO_FLAGS} go test -c ./test/e2e/...
.PHONY: docker
docker:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
@[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=TARGETARCH=$(GOARCH) --file build/Dockerfile -t "$(BUILD_IMAGE)" .
.PHONY: dockerx
dockerx:
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
@[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=GOPROXY=${GOPROXY} --platform=$(PLATFORMS) --file build/Dockerfile $(IMAGE_TAGS) .
.PHONY: push
push:
ifeq ($(CI),true)
$(ECHO) Skipping push, as the build is running within a CI environment
@echo Skipping push, as the build is running within a CI environment
else
$(ECHO) "Pushing image $(IMG)..."
$(VECHO)docker push $(IMG) > /dev/null
@echo "Pushing image $(BUILD_IMAGE)..."
@docker push $(BUILD_IMAGE) > /dev/null
endif
.PHONY: unit-tests
unit-tests: envtest
unit-tests:
@echo Running unit tests...
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -p 1 ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
@go test $(VERBOSE) $(UNIT_TEST_PACKAGES) -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
.PHONY: e2e-tests
e2e-tests: prepare-e2e-tests e2e-tests-smoke e2e-tests-cassandra e2e-tests-es e2e-tests-self-provisioned-es e2e-tests-streaming e2e-tests-examples1 e2e-tests-examples2 e2e-tests-examples-openshift e2e-tests-generate
.PHONY: prepare-e2e-tests
prepare-e2e-tests: build docker push
@mkdir -p deploy/test
@cp deploy/service_account.yaml deploy/test/namespace-manifests.yaml
@echo "---" >> deploy/test/namespace-manifests.yaml
@cat deploy/role.yaml >> deploy/test/namespace-manifests.yaml
@echo "---" >> deploy/test/namespace-manifests.yaml
@# ClusterRoleBinding is created in test codebase because we don't know service account namespace
@cat deploy/role_binding.yaml >> deploy/test/namespace-manifests.yaml
@echo "---" >> deploy/test/namespace-manifests.yaml
@${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml
@cp deploy/crds/jaegertracing.io_jaegers_crd.yaml deploy/test/global-manifests.yaml
@echo "---" >> deploy/test/global-manifests.yaml
@cat deploy/cluster_role.yaml >> deploy/test/global-manifests.yaml
.PHONY: e2e-tests-smoke
e2e-tests-smoke: prepare-e2e-tests
@echo Running Smoke end-to-end tests...
@BUILD_IMAGE=$(BUILD_IMAGE) go test -tags=smoke ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-generate
e2e-tests-generate: prepare-e2e-tests
@echo Running generate end-to-end tests...
@BUILD_IMAGE=$(BUILD_IMAGE) go test -tags=generate ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-cassandra
e2e-tests-cassandra: prepare-e2e-tests cassandra
@echo Running Cassandra end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) go test -tags=cassandra ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-es
e2e-tests-es: prepare-e2e-tests es
@echo Running Elasticsearch end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) go test -tags=elasticsearch ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-self-provisioned-es
e2e-tests-self-provisioned-es: prepare-e2e-tests deploy-es-operator
@echo Running Self provisioned Elasticsearch end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=self_provisioned_elasticsearch ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-self-provisioned-es-kafka
e2e-tests-self-provisioned-es-kafka: prepare-e2e-tests deploy-kafka-operator deploy-es-operator
@echo Running Self provisioned Elasticsearch and Kafka end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=self_provisioned_elasticsearch_kafka ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-token-propagation-es
e2e-tests-token-propagation-es: prepare-e2e-tests deploy-es-operator
@echo Running Token Propagation Elasticsearch end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) TEST_TIMEOUT=5 ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=token_propagation_elasticsearch ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-streaming
e2e-tests-streaming: prepare-e2e-tests es kafka
@echo Running Streaming end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=streaming ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-examples1
e2e-tests-examples1: prepare-e2e-tests cassandra
@echo Running Example end-to-end tests part 1...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples1 ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-examples2
e2e-tests-examples2: prepare-e2e-tests es kafka
@echo Running Example end-to-end tests part 2...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples2 ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-examples-openshift
e2e-tests-examples-openshift: prepare-e2e-tests deploy-es-operator
@echo Running OpenShift Example end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples_openshift ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-autoscale
e2e-tests-autoscale: prepare-e2e-tests es kafka
@echo Running Autoscale end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=autoscale ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-multi-instance
e2e-tests-multi-instance: prepare-e2e-tests es kafka
@echo Running Multiple Instance end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=multiple ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-upgrade
e2e-tests-upgrade: prepare-e2e-tests
@echo Prepare next version image...
@[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=JAEGER_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) --file build/Dockerfile -t "$(NAMESPACE)/$(OPERATOR_NAME):next" .
BUILD_IMAGE="$(NAMESPACE)/$(OPERATOR_NAME):next" $(MAKE) push
@echo Running Upgrade end-to-end tests...
UPGRADE_TEST_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) go test -tags=upgrade ./test/e2e/... $(TEST_OPTIONS)
.PHONY: e2e-tests-istio
e2e-tests-istio: prepare-e2e-tests istio
@echo Running Istio end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=istio ./test/e2e/... $(TEST_OPTIONS)
.PHONY: run
run: crd
@rm -rf /tmp/_cert*
@POD_NAMESPACE=default OPERATOR_NAME=${OPERATOR_NAME} operator-sdk run local --watch-namespace="${WATCH_NAMESPACE}" --operator-flags "start ${CLI_FLAGS}" --go-ldflags ${LD_FLAGS}
.PHONY: run-debug
run-debug: run
run-debug: CLI_FLAGS = --log-level=debug --tracing-enabled=true
.PHONY: set-max-map-count
set-max-map-count:
# This is not required in OCP 4.1. The node tuning operator configures the property automatically
# when label tuned.openshift.io/elasticsearch=true label is present on the ES pod. The label
# is configured by ES operator.
@minishift ssh -- 'sudo sysctl -w vm.max_map_count=262144' > /dev/null 2>&1 || true
.PHONY: set-node-os-linux
set-node-os-linux:
# Elasticsearch requires labeled nodes. These labels are by default present in OCP 4.2
$(VECHO)kubectl label nodes --all kubernetes.io/os=linux --overwrite
@kubectl label nodes --all kubernetes.io/os=linux --overwrite
cert-manager: cmctl
# Consider using cmctl to install the cert-manager once install command is not experimental
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v${CERTMANAGER_VERSION}/cert-manager.yaml
$(CMCTL) check api --wait=5m
.PHONY: deploy-es-operator
deploy-es-operator: set-node-os-linux set-max-map-count deploy-prometheus-operator
ifeq ($(OLM),true)
@echo Skipping es-operator deployment, assuming it has been installed via OperatorHub
else
@kubectl create namespace ${ES_OPERATOR_NAMESPACE} 2>&1 | grep -v "already exists" || true
@kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/01-service-account.yaml -n ${ES_OPERATOR_NAMESPACE}
@kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/02-role.yaml
@kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/03-role-bindings.yaml
@kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/04-crd.yaml -n ${ES_OPERATOR_NAMESPACE}
@kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/05-deployment.yaml -n ${ES_OPERATOR_NAMESPACE}
@kubectl set image deployment/elasticsearch-operator elasticsearch-operator=${ES_OPERATOR_IMAGE} -n ${ES_OPERATOR_NAMESPACE}
endif
undeploy-cert-manager:
kubectl delete --ignore-not-found=true -f https://github.com/jetstack/cert-manager/releases/download/v${CERTMANAGER_VERSION}/cert-manager.yaml
cmctl: $(CMCTL)
$(CMCTL): $(LOCALBIN)
./hack/install/install-cmctl.sh $(CERTMANAGER_VERSION)
.PHONY: undeploy-es-operator
undeploy-es-operator:
ifeq ($(OLM),true)
@echo Skipping es-operator undeployment, as it should have been installed via OperatorHub
else
@kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/05-deployment.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
@kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/04-crd.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
@kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/03-role-bindings.yaml --ignore-not-found=true || true
@kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/02-role.yaml --ignore-not-found=true || true
@kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/01-service-account.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
@kubectl delete namespace ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true 2>&1 || true
endif
.PHONY: es
es: storage
ifeq ($(SKIP_ES_EXTERNAL),true)
$(ECHO) Skipping creation of external Elasticsearch instance
@echo Skipping creation of external Elasticsearch instance
else
$(VECHO)kubectl create -f ./tests/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
@kubectl create -f ./test/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
endif
.PHONY: istio
istio:
$(ECHO) Install istio with minimal profile
$(VECHO)./hack/install/install-istio.sh
$(VECHO)${ISTIOCTL} install --set profile=minimal -y
@echo Install istio with minimal profile
@mkdir -p deploy/test
@[ -f "${ISTIOCTL}" ] || (curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} TARGET_ARCH=x86_64 sh - && mv ./istio-${ISTIO_VERSION} ./deploy/test/istio)
@${ISTIOCTL} install --set profile=minimal -y
.PHONY: undeploy-istio
undeploy-istio:
$(VECHO)${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f - || true
$(VECHO)kubectl delete namespace istio-system --ignore-not-found=true || true
@[ -f "${ISTIOCTL}" ] && (${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f -) || true
@kubectl delete namespace istio-system --ignore-not-found=true || true
@rm -rf deploy/test/istio
.PHONY: cassandra
cassandra: storage
$(VECHO)kubectl create -f ./tests/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
@kubectl create -f ./test/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
.PHONY: storage
storage:
$(ECHO) Creating namespace $(STORAGE_NAMESPACE)
$(VECHO)kubectl create namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
@echo Creating namespace $(STORAGE_NAMESPACE)
@kubectl create namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
.PHONY: deploy-kafka-operator
deploy-kafka-operator:
$(ECHO) Creating namespace $(KAFKA_NAMESPACE)
$(VECHO)kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
ifeq ($(KAFKA_OLM),true)
$(ECHO) Skipping kafka-operator deployment, assuming it has been installed via OperatorHub
@echo Creating namespace $(KAFKA_NAMESPACE)
@kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
ifeq ($(OLM),true)
@echo Skipping kafka-operator deployment, assuming it has been installed via OperatorHub
else
$(VECHO)curl --fail --location https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.32.0/strimzi-0.32.0.tar.gz --output tests/_build/kafka-operator.tar.gz --create-dirs
$(VECHO)tar xf tests/_build/kafka-operator.tar.gz
$(VECHO)${SED} -i 's/namespace: .*/namespace: ${KAFKA_NAMESPACE}/' strimzi-${KAFKA_VERSION}/install/cluster-operator/*RoleBinding*.yaml
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/020-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/023-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml -n ${KAFKA_NAMESPACE}
$(VECHO)kubectl apply -f strimzi-${KAFKA_VERSION}/install/cluster-operator/ -n ${KAFKA_NAMESPACE}
@kubectl create clusterrolebinding strimzi-cluster-operator-namespaced --clusterrole=strimzi-cluster-operator-namespaced --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
@kubectl create clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --clusterrole=strimzi-entity-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
@kubectl create clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --clusterrole=strimzi-topic-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
@curl --fail --location $(KAFKA_YAML) --output deploy/test/kafka-operator.yaml --create-dirs
@${SED} 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true
@kubectl set env deployment strimzi-cluster-operator -n ${KAFKA_NAMESPACE} STRIMZI_NAMESPACE="*"
endif
.PHONY: undeploy-kafka-operator
undeploy-kafka-operator:
ifeq ($(KAFKA_OLM),true)
$(ECHO) Skiping kafka-operator undeploy
ifeq ($(OLM),true)
@echo Skiping kafka-operator undeploy
else
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f tests/_build/kafka-operator.yaml --ignore-not-found=true 2>&1 || true
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-namespaced --ignore-not-found=true || true
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --ignore-not-found=true || true
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --ignore-not-found=true || true
@kubectl delete --namespace $(KAFKA_NAMESPACE) -f deploy/test/kafka-operator.yaml --ignore-not-found=true 2>&1 || true
@kubectl delete clusterrolebinding strimzi-cluster-operator-namespaced --ignore-not-found=true || true
@kubectl delete clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --ignore-not-found=true || true
@kubectl delete clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --ignore-not-found=true || true
endif
@kubectl delete namespace $(KAFKA_NAMESPACE) --ignore-not-found=true 2>&1 || true
.PHONY: kafka
kafka: deploy-kafka-operator
ifeq ($(SKIP_KAFKA),true)
$(ECHO) Skipping Kafka/external ES related tests
@echo Skipping Kafka/external ES related tests
else
$(ECHO) Creating namespace $(KAFKA_NAMESPACE)
$(VECHO)mkdir -p tests/_build/
$(VECHO)kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
$(VECHO)curl --fail --location $(KAFKA_EXAMPLE) --output tests/_build/kafka-example.yaml --create-dirs
$(VECHO)${SED} -i 's/size: 100Gi/size: 10Gi/g' tests/_build/kafka-example.yaml
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=client -f tests/_build/kafka-example.yaml
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply -f tests/_build/kafka-example.yaml 2>&1 | grep -v "already exists" || true
@echo Creating namespace $(KAFKA_NAMESPACE)
@kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
@curl --fail --location $(KAFKA_EXAMPLE) --output deploy/test/kafka-example.yaml --create-dirs
@${SED} -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml
@kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=true -f deploy/test/kafka-example.yaml
@kubectl -n $(KAFKA_NAMESPACE) apply -f deploy/test/kafka-example.yaml 2>&1 | grep -v "already exists" || true
endif
.PHONY: undeploy-kafka
undeploy-kafka: undeploy-kafka-operator
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f tests/_build/kafka-example.yaml 2>&1 || true
@kubectl delete --namespace $(KAFKA_NAMESPACE) -f deploy/test/kafka-example.yaml 2>&1 || true
.PHONY: deploy-prometheus-operator
deploy-prometheus-operator:
ifeq ($(PROMETHEUS_OLM),true)
$(ECHO) Skipping prometheus-operator deployment, assuming it has been installed via OperatorHub
ifeq ($(OLM),true)
@echo Skipping prometheus-operator deployment, assuming it has been installed via OperatorHub
else
$(VECHO)kubectl apply -f ${PROMETHEUS_BUNDLE}
@kubectl apply -f ${PROMETHEUS_BUNDLE}
endif
.PHONY: undeploy-prometheus-operator
undeploy-prometheus-operator:
ifeq ($(PROMETHEUS_OLM),true)
$(ECHO) Skipping prometheus-operator undeployment, as it should have been installed via OperatorHub
ifeq ($(OLM),true)
@echo Skipping prometheus-operator undeployment, as it should have been installed via OperatorHub
else
$(VECHO)kubectl delete -f ${PROMETHEUS_BUNDLE} --ignore-not-found=true || true
@kubectl delete -f ${PROMETHEUS_BUNDLE} --ignore-not-found=true || true
endif
.PHONY: clean
clean: undeploy-kafka undeploy-prometheus-operator undeploy-istio undeploy-cert-manager
$(VECHO)kubectl delete namespace $(KAFKA_NAMESPACE) --ignore-not-found=true 2>&1 || true
$(VECHO)if [ -d tests/_build ]; then rm -rf tests/_build ; fi
$(VECHO)kubectl delete -f ./tests/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
$(VECHO)kubectl delete -f ./tests/elasticsearch.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator undeploy-istio
@rm -f deploy/test/*.yaml
@if [ -d deploy/test ]; then rmdir deploy/test ; fi
@kubectl delete -f ./test/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
@kubectl delete -f ./test/elasticsearch.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
@kubectl delete -f deploy/crds/jaegertracing.io_jaegers_crd.yaml --ignore-not-found=true || true
@kubectl delete -f deploy/operator.yaml --ignore-not-found=true || true
@kubectl delete -f deploy/role_binding.yaml --ignore-not-found=true || true
@kubectl delete -f deploy/role.yaml --ignore-not-found=true || true
@kubectl delete -f deploy/service_account.yaml --ignore-not-found=true || true
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: crd
crd:
@kubectl create -f deploy/crds/jaegertracing.io_jaegers_crd.yaml 2>&1 | grep -v "already exists" || true
.PHONY: ingress
ingress:
@minikube addons enable ingress
.PHONY: generate
generate: controller-gen api-docs ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
generate: internal-generate format
.PHONY: internal-generate
internal-generate:
@GOPATH=${GOPATH} GOROOT=${GOROOT} ./.ci/generate.sh
.PHONY: test
test: unit-tests run-e2e-tests
test: unit-tests e2e-tests
.PHONY: all
all: check format lint build test
all: check format lint security build test
.PHONY: ci
ci: install-tools ensure-generate-is-noop check format lint build unit-tests
ci: ensure-generate-is-noop check format lint security build unit-tests
##@ Deployment
.PHONY: scorecard
scorecard:
@operator-sdk scorecard --cr-manifest deploy/examples/simplest.yaml --csv-path deploy/olm-catalog/jaeger.clusterserviceversion.yaml --init-timeout 30
ignore-not-found ?= false
.PHONY: install-sdk
install-sdk:
@echo Installing SDK ${SDK_VERSION}
@SDK_VERSION=$(SDK_VERSION) GOPATH=$(GOPATH) ./.ci/install-sdk.sh
.PHONY: install-tools
install-tools:
@${GO_FLAGS} ./.ci/vgot.sh \
golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
github.com/securego/gosec/cmd/gosec@v0.0.0-20191008095658-28c1128b7336 \
sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0 \
k8s.io/code-generator/cmd/client-gen@v0.18.6 \
k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20200410145947-61e04a5be9a6
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
install: install-sdk install-tools
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
kubectl create namespace observability 2>&1 | grep -v "already exists" || true
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
./hack/enable-operator-features.sh
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
deploy: ingress crd
@kubectl apply -f deploy/service_account.yaml
@kubectl apply -f deploy/cluster_role.yaml
@kubectl apply -f deploy/cluster_role_binding.yaml
@${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f -
.PHONY: operatorhub
operatorhub: check-operatorhub-pr-template
$(VECHO)./.ci/operatorhub.sh
@./.ci/operatorhub.sh
.PHONY: check-operatorhub-pr-template
check-operatorhub-pr-template:
$(VECHO)curl https://raw.githubusercontent.com/operator-framework/community-operators/master/docs/pull_request_template.md -o .ci/.operatorhub-pr-template.md -s > /dev/null 2>&1
$(VECHO)git diff -s --exit-code .ci/.operatorhub-pr-template.md || (echo "Build failed: the PR template for OperatorHub has changed. Sync it and try again." && exit 1)
@curl https://raw.githubusercontent.com/operator-framework/community-operators/master/docs/pull_request_template.md -o .ci/.operatorhub-pr-template.md -s > /dev/null 2>&1
@git diff -s --exit-code .ci/.operatorhub-pr-template.md || (echo "Build failed: the PR template for OperatorHub has changed. Sync it and try again." && exit 1)
.PHONY: local-jaeger-container
local-jaeger-container:
@echo "Starting local container with Jaeger. Check http://localhost:16686"
@docker run -d --rm -p 16686:16686 -p 6831:6831/udp --name jaeger jaegertracing/all-in-one:1.22 > /dev/null
.PHONY: changelog
changelog:
$(ECHO) "Set env variable OAUTH_TOKEN before invoking, https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token"
$(VECHO)docker run --rm -v "${PWD}:/app" pavolloffay/gch:latest --oauth-token ${OAUTH_TOKEN} --branch main --owner jaegertracing --repo jaeger-operator
@echo "Set env variable OAUTH_TOKEN before invoking, https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token"
@docker run --rm -v "${PWD}:/app" pavolloffay/gch:latest --oauth-token ${OAUTH_TOKEN} --owner jaegertracing --repo jaeger-operator
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(VECHO)./hack/install/install-controller-gen.sh
# e2e tests using kuttl
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(ENVTEST) || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
kuttl:
ifeq (, $(shell which kubectl-kuttl))
echo ${PATH}
ls -l /usr/local/bin
which kubectl-kuttl
.PHONY: bundle
bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
$(SED) -i "s#containerImage: quay.io/jaegertracing/jaeger-operator:$(OPERATOR_VERSION)#containerImage: quay.io/jaegertracing/jaeger-operator:$(VERSION)#g" config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/minKubeVersion: .*/minKubeVersion: $(MIN_KUBERNETES_VERSION)/' config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
$(SED) -i 's/com.redhat.openshift.versions=.*/com.redhat.openshift.versions=v$(MIN_OPENSHIFT_VERSION)/' bundle.Dockerfile
$(SED) -i 's/com.redhat.openshift.versions: .*/com.redhat.openshift.versions: v$(MIN_OPENSHIFT_VERSION)/' bundle/metadata/annotations.yaml
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --manifests --version $(VERSION) $(BUNDLE_METADATA_OPTS)
$(OPERATOR_SDK) bundle validate ./bundle
./hack/ignore-createdAt-bundle.sh
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
docker push $(BUNDLE_IMG)
.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
echo "" ;\
echo "ERROR: kuttl not found." ;\
echo "Please check https://kuttl.dev/docs/cli.html for installation instructions and try again." ;\
echo "" ;\
exit 1 ;\
}
else
OPM = $(shell which opm)
endif
KUTTL=$(shell which kubectl-kuttl)
endif
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)
.PHONY: start-kind
start-kind: kind
ifeq ($(USE_KIND_CLUSTER),true)
$(ECHO) Starting KIND cluster...
# Instead of letting KUTTL create the Kind cluster (using the CLI or in the kuttl-tests.yaml
# file), the cluster is created here. There are multiple reasons to do this:
# * The kubectl command will not work outside KUTTL
# * Some KUTTL versions are not able to start properly a Kind cluster
# * The cluster will be removed after running KUTTL (this can be disabled). Sometimes,
# the cluster teardown is not done properly and KUTTL can not be run with the --start-kind flag
# When the Kind cluster is not created by Kuttl, the kindContainers parameter
# from kuttl-tests.yaml has not effect so, it is needed to load the container
# images here.
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
# Install metrics-server for HPA
$(ECHO)"Installing the metrics-server in the kind cluster"
$(VECHO)kubectl apply -f $(METRICS_SERVER_YAML)
$(VECHO)kubectl patch deployment -n kube-system metrics-server --type "json" -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": --kubelet-insecure-tls}]'
# Install the ingress-controller
$(ECHO)"Installing the Ingress controller in the kind cluster"
$(VECHO)kubectl apply -f $(INGRESS_CONTROLLER_YAML)
# Check the deployments were done properly
$(ECHO)"Checking the metrics-server was deployed properly"
$(VECHO)kubectl wait --for=condition=available deployment/metrics-server -n kube-system --timeout=5m
$(ECHO)"Checking the Ingress controller deployment was done successfully"
$(VECHO)kubectl wait --for=condition=available deployment ingress-nginx-controller -n ingress-nginx --timeout=5m
else
$(ECHO)"KIND cluster creation disabled. Skipping..."
endif
stop-kind:
$(ECHO)"Stopping the kind cluster"
$(VECHO)kind delete cluster
.PHONY: install-git-hooks
install-git-hooks:
$(VECHO)cp scripts/git-hooks/pre-commit .git/hooks
# Generates the released manifests
release-artifacts: set-image-controller
mkdir -p dist
$(KUSTOMIZE) build config/default -o dist/jaeger-operator.yaml
# Set the controller image parameters
set-image-controller: manifests kustomize
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
.PHONY: tools
tools: kustomize controller-gen operator-sdk
.PHONY: install-tools
install-tools: operator-sdk
$(VECHO)./hack/install/install-golangci-lint.sh
$(VECHO)./hack/install/install-goimports.sh
.PHONY: kustomize
kustomize: $(KUSTOMIZE)
$(KUSTOMIZE): $(LOCALBIN)
./hack/install/install-kustomize.sh
.PHONY: kind
kind: $(KIND)
$(KIND): $(LOCALBIN)
./hack/install/install-kind.sh
.PHONY: prepare-release
prepare-release:
$(VECHO)./.ci/prepare-release.sh
scorecard-tests: operator-sdk
echo "Operator sdk is $(OPERATOR_SDK)"
$(OPERATOR_SDK) scorecard bundle -w 10m || (echo "scorecard test failed" && exit 1)
scorecard-tests-local: kind
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
$(VECHO)docker pull $(SCORECARD_TEST_IMG)
$(VECHO)$(KIND) load docker-image $(SCORECARD_TEST_IMG)
$(VECHO)kubectl wait --timeout=5m --for=condition=available deployment/coredns -n kube-system
$(VECHO)$(MAKE) scorecard-tests
.PHONY: operator-sdk
operator-sdk: $(OPERATOR_SDK)
$(OPERATOR_SDK): $(LOCALBIN)
test -s $(OPERATOR_SDK) || curl -sLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v${OPERATOR_SDK_VERSION}/operator-sdk_`go env GOOS`_`go env GOARCH`
@chmod +x $(OPERATOR_SDK)
api-docs: crdoc kustomize
kind:
ifeq (, $(shell which kind))
@{ \
set -e ;\
TMP_DIR=$$(mktemp -d) ; \
$(KUSTOMIZE) build config/crd -o $$TMP_DIR/crd-output.yaml ;\
$(CRDOC) --resources $$TMP_DIR/crd-output.yaml --output docs/api.md ;\
echo "" ;\
echo "ERROR: kind not found." ;\
echo "Please check https://kind.sigs.k8s.io/docs/user/quick-start/#installation for installation instructions and try again." ;\
echo "" ;\
exit 1 ;\
}
else
KIND=$(shell which kind)
endif
.PHONY: crdoc
crdoc: $(CRDOC)
$(CRDOC): $(LOCALBIN)
test -s $(CRDOC) || GOBIN=$(LOCALBIN) go install fybrik.io/crdoc@v0.5.2
@chmod +x $(CRDOC)
.PHONY: prepare-e2e-kuttl-tests
prepare-e2e-kuttl-tests: BUILD_IMAGE="local/jaeger-operator:e2e"
prepare-e2e-kuttl-tests: build docker build-assert-job
@mkdir -p tests/_build/manifests
@mkdir -p tests/_build/crds
@cp deploy/service_account.yaml tests/_build/manifests/01-jaeger-operator.yaml
@echo "---" >> tests/_build/manifests/01-jaeger-operator.yaml
@cat deploy/role.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
@echo "---" >> tests/_build/manifests/01-jaeger-operator.yaml
@cat deploy/cluster_role.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
@echo "---" >> tests/_build/manifests/01-jaeger-operator.yaml
@${SED} "s~namespace: .*~namespace: jaeger-operator-system~gi" deploy/cluster_role_binding.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
@echo "---" >> tests/_build/manifests/01-jaeger-operator.yaml
@${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
@${SED} "s~imagePullPolicy: Always~imagePullPolicy: Never~gi" tests/_build/manifests/01-jaeger-operator.yaml -i
@${SED} "0,/fieldPath: metadata.namespace/s/fieldPath: metadata.namespace/fieldPath: metadata.annotations['olm.targetNamespaces']/gi" tests/_build/manifests/01-jaeger-operator.yaml -i
@cp deploy/crds/jaegertracing.io_jaegers_crd.yaml tests/_build/crds/jaegertracing.io_jaegers_crd.yaml
docker pull jaegertracing/vertx-create-span:operator-e2e-tests
# end-to-tests
.PHONY: kuttl-e2e
kuttl-e2e: prepare-e2e-kuttl-tests start-kind run-kuttl-e2e
.PHONY: run-kuttl-e2e
run-kuttl-e2e:
$(KUTTL) test
start-kind:
kind create cluster --config $(KIND_CONFIG)
kind load docker-image local/jaeger-operator:e2e
kind load docker-image local/asserts:e2e
kind load docker-image jaegertracing/vertx-create-span:operator-e2e-tests
.PHONY: build-assert-job
build-assert-job:
@docker build -t local/asserts:e2e -f Dockerfile.asserts .

23
PROJECT
View File

@ -1,23 +0,0 @@
domain: jaegertracing.io
layout:
- go.kubebuilder.io/v3
multigroup: true
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: jaeger-operator
repo: github.com/jaegertracing/jaeger-operator
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: jaegertracing.io
kind: Jaeger
path: github.com/jaegertracing/jaeger-operator/apis/v1
version: v1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
version: "3"

174
README.md
View File

@ -1,4 +1,5 @@
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc] [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/jaegertracing/jaeger-operator/badge)](https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger-operator)
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc]
# Jaeger Operator for Kubernetes
@ -8,7 +9,23 @@ The Jaeger Operator is an implementation of a [Kubernetes Operator](https://kube
Firstly, ensure an [ingress-controller is deployed](https://kubernetes.github.io/ingress-nginx/deploy/). When using `minikube`, you can use the `ingress` add-on: `minikube start --addons=ingress`
Then follow the Jaeger Operator [installation instructions](https://www.jaegertracing.io/docs/latest/operator/).
To install the operator, run:
```
kubectl create namespace observability
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/crds/jaegertracing.io_jaegers_crd.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/service_account.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/role.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/role_binding.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/operator.yaml
```
The operator will activate extra features if given cluster-wide permissions. To enable that, run:
```
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/cluster_role.yaml
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/cluster_role_binding.yaml
```
Note that you'll need to download and customize the `cluster_role_binding.yaml` if you are using a namespace other than `observability`. You probably also want to download and customize the `operator.yaml`, setting the env var `WATCH_NAMESPACE` to have an empty value, so that it can watch for instances across all namespaces.
Once the `jaeger-operator` deployment in the namespace `observability` is ready, create a Jaeger instance, like:
@ -33,11 +50,16 @@ In this example, the Jaeger UI is available at http://192.168.122.34.
The official documentation for the Jaeger Operator, including all its customization options, are available under the main [Jaeger Documentation](https://www.jaegertracing.io/docs/latest/operator/).
CRD-API documentation can be found [here](./docs/api.md).
## Compatibility matrix
See the compatibility matrix [here](./COMPATIBILITY.md).
The following table shows the compatibility of jaeger operator with different components, in this particular case we shows Kubernetes and Strimzi operator compatibility
| Jaeger Operator | Kubernetes | Strimzi Operator |
|-----------------|----------------------|---------------------
| v1.24 | v1.19, v1.20, v1.21 | v0.23 |
| v1.23 | v1.19, v1.20, v1.21 | v0.19, v0.20 |
| v1.22 | v1.18 to v1.20 | v0.19 |
### Jaeger Operator vs. Jaeger
@ -64,159 +86,27 @@ The jaeger Operator *might* work on other untested versions of Strimzi Operator,
## (experimental) Generate Kubernetes manifest file
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml) to the namespace `jaeger-test`:
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/examples/simplest.yaml) to the namespace `jaeger-test`:
```bash
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:main generate | kubectl apply -n jaeger-test -f -
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:master generate | kubectl apply -n jaeger-test -f -
```
It is recommended to deploy the operator instead of generating a static manifest.
## Jaeger V2 Operator
As the Jaeger V2 is released, it is decided that Jaeger V2 will deployed on Kubernetes using [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator). This will benefit both the users of Jaeger and OpenTelemetry. To use Jaeger V2 with OpenTelemetry Operator, the steps are as follows:
* Install the cert-manager in the existing cluster with the command:
```bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `cert-manager` namespace.
* Install the OpenTelemetry Operator by running:
```bash
kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
```
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `opentelemetry-operator-system` namespace.
### Using Jaeger with in-memory storage
Once all the resources are ready, create a Jaeger instance as follows:
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-inmemory-instance
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
service:
extensions: [jaeger_storage, jaeger_query]
pipelines:
traces:
receivers: [otlp]
exporters: [jaeger_storage_exporter]
extensions:
jaeger_query:
storage:
traces: memstore
jaeger_storage:
backends:
memstore:
memory:
max_traces: 100000
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
jaeger_storage_exporter:
trace_storage: memstore
EOF
```
To use the in-memory storage ui for Jaeger V2, expose the pod, deployment or the service as follows:
```bash
kubectl port-forward deployment/jaeger-inmemory-instance-collector 8080:16686
```
Or
```bash
kubectl port-forward service/jaeger-inmemory-instance-collector 8080:16686
```
Once done, type `localhost:8080` in the browser to interact with the UI.
[Note] There's an ongoing development in OpenTelemetry Operator where users will be able to interact directly with the UI.
### Using Jaeger with database to store traces
To use Jaeger V2 with the supported database, it is mandatory to create database deployments and they should be in `ready` state [(ref)](https://www.jaegertracing.io/docs/2.0/storage/).
Create a Kubernetes Service that exposes the database pods enabling communication between the database and Jaeger pods.
This can be achieved by creating a service in two ways, first by creating it [manually](https://kubernetes.io/docs/concepts/services-networking/service/) or second by creating it using imperative command.
```bash
kubectl expose pods <pod-name> --port=<port-number> --name=<name-of-the-service>
```
Or
```bash
kubectl expose deployment <deployment-name> --port=<port-number> --name=<name-of-the-service>
```
After the service is created, add the name of the service as an endpoint in their respective config as follows:
* [Cassandra DB](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-cassandra.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
cassandra:
connection:
servers: [<name-of-the-service>]
```
* [ElasticSearch](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-elasticsearch.yaml):
```yaml
jaeger_storage:
backends:
some_storage:
elasticseacrh:
servers: [<name-of-the-service>]
```
Use the modified config to create Jaeger instance with the help of OpenTelemetry Operator.
```yaml
kubectl apply -f - <<EOF
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: jaeger-storage-instance # name of your choice
spec:
image: jaegertracing/jaeger:latest
ports:
- name: jaeger
port: 16686
config:
# modified config
EOF
```
## Contributing and Developing
Please see [CONTRIBUTING.md](CONTRIBUTING.md).
## License
[Apache 2.0 License](./LICENSE).
[ci-img]: https://github.com/jaegertracing/jaeger-operator/workflows/CI%20Workflow/badge.svg
[ci]: https://github.com/jaegertracing/jaeger-operator/actions
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-operator/branch/main/graph/badge.svg
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-operator/branch/master/graph/badge.svg
[cov]: https://codecov.io/github/jaegertracing/jaeger-operator/
[goreport-img]: https://goreportcard.com/badge/github.com/jaegertracing/jaeger-operator
[goreport]: https://goreportcard.com/report/github.com/jaegertracing/jaeger-operator
[godoc-img]: https://godoc.org/github.com/jaegertracing/jaeger-operator?status.svg
[godoc]: https://godoc.org/github.com/jaegertracing/jaeger-operator/apis/v1#JaegerSpec
[godoc]: https://godoc.org/github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1#JaegerSpec

View File

@ -1,72 +1,39 @@
# Releasing the Jaeger Operator for Kubernetes
## Generating the changelog
1. Update Jaeger version in `versions.txt`
- Get the `OAUTH_TOKEN` from [Github](https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token), select `repo:status` scope.
- Run `OAUTH_TOKEN=... make changelog`
- Remove the commits that are not relevant to users, like:
* CI or testing-specific commits (e2e, unit test, ...)
* bug fixes for problems that are not part of a release yet
* version bumps for internal dependencies
1. Make sure the new version is present at `pkg/upgrade/versions.go`
## Releasing
1. Prepare a changelog since last release. Get the `OAUTH_TOKEN` from (Github)[https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token] and select `repo:status` scope.
Steps to release a new version of the Jaeger Operator:
1. Change the `versions.txt `so that it lists the target version of the Jaeger (if it is required). **Don't touch the operator version**: it will be changed automatically in the next step.
2. Confirm that `MIN_KUBERNETES_VERSION` and `MIN_OPENSHIFT_VERSION` in the `Makefile` are still up-to-date, and update them if required.
2. Run `OPERATOR_VERSION=1.30.0 make prepare-release`, using the operator version that will be released.
3. Run the E2E tests in OpenShift as described in [the CONTRIBUTING.md](CONTRIBUTING.md#an-external-cluster-like-openshift) file. The tests will be executed automatically in Kubernetes by the GitHub Actions CI later.
4. Prepare a changelog since last release.
4. Update the release manager schedule.
5. Commit the changes and create a pull request:
```sh
git commit -sm "Preparing release v1.30.0"
```
5. Once the changes above are merged and available in `main` tag it with the desired version, prefixed with `v`, eg. `v1.30.0`
```sh
git checkout main
git tag v1.30.0
git push git@github.com:jaegertracing/jaeger-operator.git v1.30.0
```
OAUTH_TOKEN=... make changelog
```
6. The GitHub Workflow will take it from here, creating a GitHub release and publishing the images
1. Commit version change and changelog and create a pull request:
7. After the release, PRs needs to be created against the Operator Hub Community Operators repositories:
```
git commit -sm "Preparing release v1.25.0"
```
* One for the [upstream-community-operators](https://github.com/k8s-operatorhub/community-operators), used by OLM on Kubernetes.
* One for the [community-operators](https://github.com/redhat-openshift-ecosystem/community-operators-prod) used by OpenShift.
1. Tag and push
This can be done with the following steps:
- Update main `git pull git@github.com:jaegertracing/jaeger-operator.git main`
- Clone both repositories `upstream-community-operators` and `community-operators`
- Run `make operatorhub`
* If you have [`gh`](https://cli.github.com/) installed and configured, it will open the necessary PRs for you automatically.
* If you don't have it, the branches will be pushed to `origin` and you should be able to open the PR from there
```
git checkout master ## it's only possible to release from master for now!
git tag release/v1.25.0
git push git@github.com:jaegertracing/jaeger-operator.git release/v1.25.0
```
## Note
After the PRs have been made it must be ensured that:
- Images listed in the ClusterServiceVersion (CSV) have a versions tag [#1682](https://github.com/jaegertracing/jaeger-operator/issues/1682)
- No `bundle` folder is included in the release
- No foreign CRs like prometheus are in the manifests
1. Wait until release CI job finishes and then pull the changes:
## Release managers
```
git pull git@github.com:jaegertracing/jaeger-operator.git master
```
The operator should be released within a week after the [Jaeger release](https://github.com/jaegertracing/jaeger/blob/main/RELEASE.md#release-managers).
1. Apply generated OLM catalog files to [operatorhub.io](https://operatorhub.io)
* Clone the [operatorhub](https://github.com/operator-framework/community-operators) repo
* Run `make operatorhub`
- If you have [`hub`](https://hub.github.com/) installed and configured, it will open the necessary PRs for you automatically. Hint: `dnf install hub` works fine on Fedora.
- If you don't have it, the branches will be pushed to `origin` and you should be able to open the PR from there
| Version | Release Manager |
|---------| -------------------------------------------------------- |
| 1.63.0 | [Benedikt Bongartz](https://github.com/frzifus) |
| 1.64.0 | [Pavol Loffay](https://github.com/pavolloffay) |
| 1.65.0 | [Israel Blancas](https://github.com/iblancasa) |
| 1.66.0 | [Ruben Vargas](https://github.com/rubenvp8510) |

View File

@ -1,20 +0,0 @@
// Package v1 contains API Schema definitions for the jaegertracing.io v1 API group
// +kubebuilder:object:generate=true
// +groupName=jaegertracing.io
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "jaegertracing.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,164 +0,0 @@
package v1
import (
"context"
"fmt"
"regexp"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
const (
defaultElasticsearchName = "elasticsearch"
)
// log is for logging in this package.
var (
jaegerlog = logf.Log.WithName("jaeger-resource")
cl client.Client
)
// SetupWebhookWithManager adds Jaeger webook to the manager.
func (j *Jaeger) SetupWebhookWithManager(mgr ctrl.Manager) error {
cl = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
For(j).
Complete()
}
//+kubebuilder:webhook:path=/mutate-jaegertracing-io-v1-jaeger,mutating=true,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=mjaeger.kb.io,admissionReviewVersions={v1}
func (j *Jaeger) objsWithOptions() []*Options {
return []*Options{
&j.Spec.AllInOne.Options, &j.Spec.Query.Options, &j.Spec.Collector.Options,
&j.Spec.Ingester.Options, &j.Spec.Agent.Options, &j.Spec.Storage.Options,
}
}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (j *Jaeger) Default() {
jaegerlog.Info("default", "name", j.Name)
jaegerlog.Info("WARNING jaeger-agent is deprecated and will removed in v1.55.0. See https://github.com/jaegertracing/jaeger/issues/4739", "component", "agent")
if j.Spec.Storage.Elasticsearch.Name == "" {
j.Spec.Storage.Elasticsearch.Name = defaultElasticsearchName
}
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
// check if ES instance exists
es := &esv1.Elasticsearch{}
err := cl.Get(context.Background(), types.NamespacedName{
Namespace: j.Namespace,
Name: j.Spec.Storage.Elasticsearch.Name,
}, es)
if errors.IsNotFound(err) {
return
}
j.Spec.Storage.Elasticsearch.NodeCount = OpenShiftElasticsearchNodeCount(es.Spec)
}
for _, opt := range j.objsWithOptions() {
optCopy := opt.DeepCopy()
if f := getAdditionalTLSFlags(optCopy.ToArgs()); f != nil {
newOpts := optCopy.GenericMap()
for k, v := range f {
newOpts[k] = v
}
if err := opt.parse(newOpts); err != nil {
jaegerlog.Error(err, "name", j.Name, "method", "Option.Parse")
}
}
}
}
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
//+kubebuilder:webhook:path=/validate-jaegertracing-io-v1-jaeger,mutating=false,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=vjaeger.kb.io,admissionReviewVersions={v1}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateCreate() (admission.Warnings, error) {
jaegerlog.Info("validate create", "name", j.Name)
return j.ValidateUpdate(nil)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
jaegerlog.Info("validate update", "name", j.Name)
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
// check if ES instance exists
es := &esv1.Elasticsearch{}
err := cl.Get(context.Background(), types.NamespacedName{
Namespace: j.Namespace,
Name: j.Spec.Storage.Elasticsearch.Name,
}, es)
if errors.IsNotFound(err) {
return nil, fmt.Errorf("elasticsearch instance not found: %w", err)
}
}
for _, opt := range j.objsWithOptions() {
got := opt.DeepCopy().ToArgs()
if f := getAdditionalTLSFlags(got); f != nil {
return nil, fmt.Errorf("tls flags incomplete, got: %v", got)
}
}
return nil, nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (j *Jaeger) ValidateDelete() (admission.Warnings, error) {
jaegerlog.Info("validate delete", "name", j.Name)
return nil, nil
}
// OpenShiftElasticsearchNodeCount returns total node count of Elasticsearch nodes.
func OpenShiftElasticsearchNodeCount(spec esv1.ElasticsearchSpec) int32 {
nodes := int32(0)
for i := 0; i < len(spec.Nodes); i++ {
nodes += spec.Nodes[i].NodeCount
}
return nodes
}
// ShouldInjectOpenShiftElasticsearchConfiguration returns true if OpenShift Elasticsearch is used and its configuration should be used.
func ShouldInjectOpenShiftElasticsearchConfiguration(s JaegerStorageSpec) bool {
if s.Type != JaegerESStorage {
return false
}
_, ok := s.Options.Map()["es.server-urls"]
return !ok
}
var (
tlsFlag = regexp.MustCompile("--.*tls.*=")
tlsFlagIdx = regexp.MustCompile("--.*tls")
tlsEnabledExists = regexp.MustCompile("--.*tls.enabled")
)
// getAdditionalTLSFlags returns additional tls arguments based on the argument
// list. If no additional argument is needed, nil is returned.
func getAdditionalTLSFlags(args []string) map[string]interface{} {
var res map[string]interface{}
for _, arg := range args {
a := []byte(arg)
if tlsEnabledExists.Match(a) {
// NOTE: if flag exists, we are done.
return nil
}
if tlsFlag.Match(a) && res == nil {
idx := tlsFlagIdx.FindIndex(a)
res = make(map[string]interface{})
res[arg[idx[0]+2:idx[1]]+".enabled"] = "true"
}
}
return res
}

View File

@ -1,369 +0,0 @@
package v1
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
_ webhook.Defaulter = &Jaeger{}
_ webhook.Validator = &Jaeger{}
)
func TestDefault(t *testing.T) {
tests := []struct {
name string
objs []runtime.Object
j *Jaeger
expected *Jaeger
}{
{
name: "set missing ES name",
j: &Jaeger{
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Elasticsearch: ElasticsearchSpec{
Name: "",
},
},
},
},
expected: &Jaeger{
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Elasticsearch: ElasticsearchSpec{
Name: "elasticsearch",
},
},
},
},
},
{
name: "set ES node count",
objs: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "project1",
},
},
&esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{
Name: "my-es",
Namespace: "project1",
},
Spec: esv1.ElasticsearchSpec{
Nodes: []esv1.ElasticsearchNode{
{
NodeCount: 3,
},
},
},
},
},
j: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
DoNotProvision: true,
},
},
},
},
expected: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
NodeCount: 3,
DoNotProvision: true,
},
},
},
},
},
{
name: "do not set ES node count",
j: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
DoNotProvision: false,
NodeCount: 1,
},
},
},
},
expected: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
NodeCount: 1,
DoNotProvision: false,
},
},
},
},
},
{
name: "missing tls enable flag",
j: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: JaegerMemoryStorage,
Options: NewOptions(map[string]interface{}{"stuff.tls.test": "something"}),
},
},
},
expected: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: JaegerMemoryStorage,
Options: NewOptions(
map[string]interface{}{
"stuff.tls.test": "something",
"stuff.tls.enabled": "true",
},
),
Elasticsearch: ElasticsearchSpec{
Name: defaultElasticsearchName,
},
},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objs...).Build()
cl = fakeCl
test.j.Default()
assert.Equal(t, test.expected, test.j)
})
}
}
func TestValidateDelete(t *testing.T) {
warnings, err := new(Jaeger).ValidateDelete()
assert.Nil(t, warnings)
require.NoError(t, err)
}
func TestValidate(t *testing.T) {
tests := []struct {
name string
objsToCreate []runtime.Object
current *Jaeger
err string
}{
{
name: "ES instance exists",
objsToCreate: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "project1",
},
},
&esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{
Name: "my-es",
Namespace: "project1",
},
Spec: esv1.ElasticsearchSpec{
Nodes: []esv1.ElasticsearchNode{
{
NodeCount: 3,
},
},
},
},
},
current: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
DoNotProvision: true,
},
},
},
},
},
{
name: "ES instance does not exist",
objsToCreate: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "project1",
},
},
},
current: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Type: "elasticsearch",
Elasticsearch: ElasticsearchSpec{
Name: "my-es",
DoNotProvision: true,
},
},
},
},
err: `elasticsearch instance not found: elasticsearchs.logging.openshift.io "my-es" not found`,
},
{
name: "missing tls options",
current: &Jaeger{
ObjectMeta: metav1.ObjectMeta{
Namespace: "project1",
},
Spec: JaegerSpec{
Storage: JaegerStorageSpec{
Options: NewOptions(map[string]interface{}{
"something.tls.else": "fails",
}),
Type: JaegerMemoryStorage,
},
},
},
err: `tls flags incomplete, got: [--something.tls.else=fails]`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
require.NoError(t, AddToScheme(scheme.Scheme))
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objsToCreate...).Build()
cl = fakeCl
warnings, err := test.current.ValidateCreate()
if test.err != "" {
require.Error(t, err)
assert.Equal(t, test.err, err.Error())
} else {
require.NoError(t, err)
}
assert.Nil(t, warnings)
})
}
}
func TestShouldDeployElasticsearch(t *testing.T) {
tests := []struct {
j JaegerStorageSpec
expected bool
}{
{j: JaegerStorageSpec{}},
{j: JaegerStorageSpec{Type: JaegerCassandraStorage}},
{j: JaegerStorageSpec{Type: JaegerESStorage, Options: NewOptions(map[string]interface{}{"es.server-urls": "foo"})}},
{j: JaegerStorageSpec{Type: JaegerESStorage}, expected: true},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
assert.Equal(t, test.expected, ShouldInjectOpenShiftElasticsearchConfiguration(test.j))
})
}
}
func TestGetAdditionalTLSFlags(t *testing.T) {
tt := []struct {
name string
args []string
expect map[string]interface{}
}{
{
name: "no tls flag",
args: []string{"--something.else"},
expect: nil,
},
{
name: "already enabled",
args: []string{"--something.tls.enabled=true", "--something.tls.else=abc"},
expect: nil,
},
{
name: "is disabled",
args: []string{"--tls.enabled=false", "--something.else", "--something.tls.else=abc"},
expect: nil,
},
{
name: "must be enabled",
args: []string{"--something.tls.else=abc"},
expect: map[string]interface{}{
"something.tls.enabled": "true",
},
},
{
// NOTE: we want to avoid something like:
// --kafka.consumer.authentication=tls.enabled=true
name: "enable consumer tls",
args: []string{
"--es.server-urls=http://elasticsearch:9200",
"--kafka.consumer.authentication=tls",
"--kafka.consumer.brokers=my-cluster-kafka-bootstrap:9093",
"--kafka.consumer.tls.ca=/var/run/secrets/cluster-ca/ca.crt",
"--kafka.consumer.tls.cert=/var/run/secrets/kafkauser/user.crt",
"--kafka.consumer.tls.key=/var/run/secrets/kafkauser/user.key",
},
expect: map[string]interface{}{
"kafka.consumer.tls.enabled": "true",
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
got := getAdditionalTLSFlags(tc.args)
if !cmp.Equal(tc.expect, got) {
t.Error("err:", cmp.Diff(tc.expect, got))
}
})
}
}

View File

@ -1,14 +0,0 @@
package v1
import (
"github.com/go-logr/logr"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
// Logger returns a logger filled with context-related fields, such as Name and Namespace
func (j *Jaeger) Logger() logr.Logger {
return logf.Log.WithValues(
"instance", j.Name,
"namespace", j.Namespace,
)
}

48
build/Dockerfile Normal file
View File

@ -0,0 +1,48 @@
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16 as builder
ARG JAEGER_VERSION
ENV JAEGER_VERSION=${JAEGER_VERSION}
COPY . /go/src/github.com/jaegertracing/jaeger-operator/
WORKDIR /go/src/github.com/jaegertracing/jaeger-operator
ARG GOPROXY
# download deps before gobuild
RUN go mod download -x
# Dockerfile `FROM --platform=${BUILDPLATFORM}` means
# prepare image for build for matched BUILDPLATFORM, eq. linux/amd64
# by this way, we could avoid to using qemu, which slow down compiling process.
# and usefully for language who support multi-arch build like go.
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
ARG TARGETARCH
# when --platform=linux/amd64,linux/arm64
#
# for $TARGETARCH in "amd64 arm64" do
RUN make gobuild OUTPUT_BINARY=/go/bin/jaeger-operator-${TARGETARCH} GOARCH=${TARGETARCH}
# done
FROM registry.access.redhat.com/ubi8/ubi
ENV OPERATOR=/usr/local/bin/jaeger-operator \
USER_UID=1001 \
USER_NAME=jaeger-operator
RUN INSTALL_PKGS=" \
openssl \
" && \
yum install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
yum clean all && \
mkdir /tmp/_working_dir && \
chmod og+w /tmp/_working_dir
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/scripts/* /scripts/
# install operator binary
ARG TARGETARCH
COPY --from=builder /go/bin/jaeger-operator-${TARGETARCH} ${OPERATOR}
ENTRYPOINT ["/usr/local/bin/jaeger-operator"]
USER ${USER_UID}:${USER_UID}

View File

@ -1,19 +0,0 @@
FROM scratch
# Core bundle labels.
LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1
LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/
LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/
LABEL operators.operatorframework.io.bundle.package.v1=jaeger
LABEL operators.operatorframework.io.bundle.channels.v1=stable
LABEL operators.operatorframework.io.bundle.channel.default.v1=stable
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.13.0+git
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
# OpenShift specific labels.
LABEL com.redhat.openshift.versions=v4.12
# Copy files to locations specified by labels.
COPY bundle/manifests /manifests/
COPY bundle/metadata /metadata/

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
name: jaeger-operator
name: jaeger-operator-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: metrics
name: jaeger-operator
name: jaeger-operator-metrics
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
name: jaeger-operator
status:
loadBalancer: {}

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
name: jaeger-operator
name: jaeger-operator-webhook-service
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
name: jaeger-operator
status:
loadBalancer: {}

View File

@ -1,21 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
name: jaeger-operator
name: prometheus
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch

View File

@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
name: jaeger-operator
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: openshift-monitoring

View File

@ -1,14 +0,0 @@
annotations:
# Core bundle annotations.
operators.operatorframework.io.bundle.mediatype.v1: registry+v1
operators.operatorframework.io.bundle.manifests.v1: manifests/
operators.operatorframework.io.bundle.metadata.v1: metadata/
operators.operatorframework.io.bundle.package.v1: jaeger
operators.operatorframework.io.bundle.channels.v1: stable
operators.operatorframework.io.bundle.channel.default.v1: stable
operators.operatorframework.io.metrics.builder: operator-sdk-v1.13.0+git
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
# OpenShift annotations
com.redhat.openshift.versions: v4.12

View File

@ -1,70 +0,0 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: false
tests:
- entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: basic
test: basic-check-spec-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: olm
test: olm-bundle-validation-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: olm
test: olm-crds-have-validation-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: olm
test: olm-crds-have-resources-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: olm
test: olm-spec-descriptors-test
storage:
spec:
mountPath: {}
- entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.32.0
labels:
suite: olm
test: olm-status-descriptors-test
storage:
spec:
mountPath: {}
storage:
spec:
mountPath: {}

13
cmd/manager/main.go Normal file
View File

@ -0,0 +1,13 @@
package main
import "github.com/jaegertracing/jaeger-operator/cmd"
func main() {
// Note that this file should be identical to the main.go at the root of the project
// It would really be nice if this one here wouldn't be required, but the Operator SDK
// requires it...
// https://github.com/operator-framework/operator-sdk/blob/master/doc/migration/v0.1.0-migration-guide.md#copy-changes-from-maingo
// > operator-sdk now expects cmd/manager/main.go to be present in Go operator projects.
// > Go project-specific commands, ex. add [api, controller], will error if main.go is not found in its expected path.
cmd.Execute()
}

View File

@ -1,28 +0,0 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes.
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: jaeger-operator-service-cert # this secret will not be prefixed, since it's not managed by kustomize
subject:
organizationalUnits:
- "jaeger-operator"

View File

@ -1,7 +0,0 @@
resources:
- certificate.yaml
namePrefix: jaeger-operator-
configurations:
- kustomizeconfig.yaml

View File

@ -1,16 +0,0 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io
fieldSpecs:
- kind: Certificate
group: cert-manager.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: cert-manager.io
path: spec/commonName
- kind: Certificate
group: cert-manager.io
path: spec/dnsNames

View File

@ -1,23 +0,0 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/jaegertracing.io_jaegers.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_jaegers.yaml
#- patches/webhook_in_kafkas.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
- patches/cainjection_in_jaegers.yaml
#- patches/cainjection_in_kafkas.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

View File

@ -1,19 +0,0 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@ -1,7 +0,0 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: jaegers.jaegertracing.io

View File

@ -1,16 +0,0 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: jaegers.jaegertracing.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: jaeger-operator-webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -1,69 +0,0 @@
# Adds namespace to all resources.
namespace: observability
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
# The prefix is not used here because the manager's deployment name is jaeger-operator
# which means that the manifest would have to contain an empty name which is not allowed.
#namePrefix: jaeger-operator-
# Labels to add to all resources and selectors.
# https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/#labels
commonLabels:
name: jaeger-operator
bases:
- ../crd
- ../rbac
- ../manager
- ../webhook
- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- manager_webhook_patch.yaml
- webhookcainjection_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: serving-cert # this name should match the one in certificate.yaml
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATE_NAME
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: serving-cert # this name should match the one in certificate.yaml
- name: SERVICE_NAMESPACE # namespace of the service
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: SERVICE_NAME
objref:
kind: Service
version: v1
name: webhook-service

View File

@ -1,33 +0,0 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: jaeger-operator
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8383/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: jaeger-operator
args:
- "start"
- "--health-probe-bind-address=:8081"
- "--leader-elect"

View File

@ -1,19 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jaeger-operator
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

View File

@ -1,22 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jaeger-operator
spec:
template:
spec:
containers:
- name: jaeger-operator
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
volumes:
- name: cert
secret:
defaultMode: 420
secretName: jaeger-operator-service-cert

View File

@ -1,15 +0,0 @@
# This patch add annotation to admission webhook config and
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)

View File

@ -1,11 +0,0 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: 31e04290.jaegertracing.io

View File

@ -1,8 +0,0 @@
resources:
- manager.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/jaegertracing/jaeger-operator
newTag: 1.65.0

View File

@ -1,83 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jaeger-operator
labels:
spec:
selector:
matchLabels:
strategy: {}
replicas: 1
template:
metadata:
labels:
spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /jaeger-operator
args:
- start
- --leader-elect
image: controller:latest
name: jaeger-operator
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 128Mi
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: "jaeger-operator"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
- name: LOG-LEVEL
value: DEBUG
- name: KAFKA-PROVISIONING-MINIMAL
value: "true"
serviceAccountName: jaeger-operator
terminationGracePeriodSeconds: 10

View File

@ -1,27 +0,0 @@
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/jaeger-operator.clusterserviceversion.yaml
- ../default
- ../samples
#- ../scorecard
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
#patchesJson6902:
#- target:
# group: apps
# version: v1
# kind: Deployment
# name: controller-manager
# namespace: system
# patch: |-
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/containers/1/volumeMounts/0
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/volumes/0

View File

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../default
components:
- ./patch

View File

@ -1,40 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: jaeger-operator-metrics-reader
- patch: |
- op: replace
path: /kind
value: Role
target:
group: rbac.authorization.k8s.io
kind: ClusterRole
- patch: |
- op: replace
path: /roleRef/kind
value: Role
target:
group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
- patch: |
- op: replace
path: /kind
value: RoleBinding
target:
group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
- target:
group: apps
version: v1
name: jaeger-operator
kind: Deployment
patch: |-
- op: replace
path: /spec/template/spec/containers/0/env/0/valueFrom/fieldRef/fieldPath
value: metadata.namespace

View File

@ -1,2 +0,0 @@
resources:
- monitor.yaml

View File

@ -1,22 +0,0 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: jaeger-operator
name: jaeger-operator-metrics-monitor
spec:
endpoints:
- path: /metrics
targetPort: 8443
scheme: https
interval: 30s
scrapeTimeout: 10s
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
name: jaeger-operator
app.kubernetes.io/component: metrics

View File

@ -1,9 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: jaeger-operator-metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -1,11 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: jaeger-operator-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: jaeger-operator

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
name: jaeger-operator
app.kubernetes.io/component: metrics
name: jaeger-operator-metrics
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
name: jaeger-operator

View File

@ -1,24 +0,0 @@
# permissions for end users to edit jaegers.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: jaeger-operator-editor-role
rules:
- apiGroups:
- jaegertracing.io
resources:
- jaegers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- jaegertracing.io
resources:
- jaegers/status
verbs:
- get

View File

@ -1,20 +0,0 @@
# permissions for end users to view jaegers.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: jaeger-operator-viewer-role
rules:
- apiGroups:
- jaegertracing.io
resources:
- jaegers
verbs:
- get
- list
- watch
- apiGroups:
- jaegertracing.io
resources:
- jaegers/status
verbs:
- get

View File

@ -1,20 +0,0 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml
- prometheus_role.yaml
- prometheus_role_binding.yaml

View File

@ -1,37 +0,0 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
name: prometheus
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch

View File

@ -1,16 +0,0 @@
# Grant cluster-monitoring access to openshift-operators-redhat metrics
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus
annotations:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: openshift-monitoring

View File

@ -1,6 +0,0 @@
apiVersion: jaegertracing.io/v1
kind: "Jaeger"
metadata:
name: "my-jaeger"
spec:
strategy: allInOne

View File

@ -1,4 +0,0 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- jaegertracing.io_v1_jaeger.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@ -1,12 +0,0 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
webhooks:
- name: deployment.sidecar-injector.jaegertracing.io
objectSelector: # Skip resources with the name jaeger-operator
matchExpressions:
- key: name
operator: NotIn
values:
- "jaeger-operator"

View File

@ -1,11 +0,0 @@
resources:
- manifests.yaml
- service.yaml
namePrefix: jaeger-operator-
configurations:
- kustomizeconfig.yaml
patchesStrategicMerge:
- deployment_inject_patch.yaml

View File

@ -1,26 +0,0 @@
# the following config is for teaching kustomize where to look at when substituting vars.
# It requires kustomize v2.1.0 or newer to work properly.
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
namespace:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
varReference:
- path: metadata/annotations

View File

@ -1,72 +0,0 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-jaegertracing-io-v1-jaeger
failurePolicy: Fail
name: mjaeger.kb.io
rules:
- apiGroups:
- jaegertracing.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- jaegers
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-v1-deployment
failurePolicy: Ignore
name: deployment.sidecar-injector.jaegertracing.io
rules:
- apiGroups:
- apps
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- deployments
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: webhook-service
namespace: system
path: /validate-jaegertracing-io-v1-jaeger
failurePolicy: Fail
name: vjaeger.kb.io
rules:
- apiGroups:
- jaegertracing.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- jaegers
sideEffects: None

View File

@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: webhook-service
namespace: system
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443

View File

@ -1,222 +0,0 @@
package appsv1
import (
"context"
"encoding/json"
"net/http"
"strings"
"github.com/spf13/viper"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/config/ca"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
"github.com/jaegertracing/jaeger-operator/pkg/tracing"
)
var _ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
// NewDeploymentInterceptorWebhook creates a new deployment mutating webhook to be registered
func NewDeploymentInterceptorWebhook(c client.Client, decoder *admission.Decoder) webhook.AdmissionHandler {
return &deploymentInterceptor{
client: c,
decoder: decoder,
}
}
// You need to ensure the path here match the path in the marker.
// +kubebuilder:webhook:path=/mutate-v1-deployment,mutating=true,failurePolicy=ignore,groups="apps",resources=deployments,sideEffects=None,verbs=create;update,versions=v1,name=deployment.sidecar-injector.jaegertracing.io,admissionReviewVersions=v1
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=namespaces/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch
// deploymentInterceptor label pods if Sidecar is specified in deployment
type deploymentInterceptor struct {
client client.Client
decoder *admission.Decoder
}
func (d *deploymentInterceptor) shouldHandleDeployment(req admission.Request) bool {
if namespaces := viper.GetString(v1.ConfigWatchNamespace); namespaces != v1.WatchAllNamespaces {
for _, ns := range strings.Split(namespaces, ",") {
if strings.EqualFold(ns, req.Namespace) {
return true
}
}
return false
}
return true
}
// Handle adds a label to a generated pod if deployment or namespace provide annotaion
func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Request) admission.Response {
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
ctx, span := tracer.Start(ctx, "reconcileDeployment")
span.SetAttributes(
attribute.String("kind", req.Kind.String()),
attribute.String("name", req.Name),
attribute.String("namespace", req.Namespace),
)
if !d.shouldHandleDeployment(req) {
return admission.Allowed("not watching in namespace, we do not touch the deployment")
}
defer span.End()
logger := log.Log.WithValues("namespace", req.Namespace)
logger.V(-1).Info("verify deployment")
dep := &appsv1.Deployment{}
err := d.decoder.Decode(req, dep)
if err != nil {
logger.Error(err, "failed to decode deployment")
return admission.Errored(http.StatusBadRequest, err)
}
if dep.Labels["app"] == "jaeger" && dep.Labels["app.kubernetes.io/component"] != "query" {
// Don't touch jaeger deployments
return admission.Allowed("is jaeger deployment, we do not touch it")
}
ns := &corev1.Namespace{}
err = d.client.Get(ctx, types.NamespacedName{Name: req.Namespace}, ns)
// we shouldn't fail if the namespace object can't be obtained
if err != nil {
msg := "failed to get the namespace for the deployment, skipping injection based on namespace annotation"
logger.Error(err, msg)
span.AddEvent(msg, trace.WithAttributes(attribute.String("error", err.Error())))
}
jaegers := &v1.JaegerList{}
opts := []client.ListOption{}
if viper.GetString(v1.ConfigOperatorScope) == v1.OperatorScopeNamespace {
opts = append(opts, client.InNamespace(viper.GetString(v1.ConfigWatchNamespace)))
}
if err := d.client.List(ctx, jaegers, opts...); err != nil {
logger.Error(err, "failed to get the available Jaeger pods")
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
}
if inject.Needed(dep, ns) {
jaeger := inject.Select(dep, ns, jaegers)
if jaeger != nil && jaeger.GetDeletionTimestamp() == nil {
logger := logger.WithValues(
"jaeger", jaeger.Name,
"jaeger-namespace", jaeger.Namespace,
)
if jaeger.Namespace != dep.Namespace {
if err := reconcileConfigMaps(ctx, d.client, jaeger, dep); err != nil {
const msg = "failed to reconcile config maps for the namespace"
logger.Error(err, msg)
span.AddEvent(msg)
}
}
// a suitable jaeger instance was found! let's inject a sidecar pointing to it then
// Verified that jaeger instance was found and is not marked for deletion.
{
msg := "injecting Jaeger Agent sidecar"
logger.Info(msg)
span.AddEvent(msg)
}
envConfigMaps := corev1.ConfigMapList{}
d.client.List(ctx, &envConfigMaps, client.InNamespace(dep.Namespace))
dep = inject.Sidecar(jaeger, dep, inject.WithEnvFromConfigMaps(inject.GetConfigMapsMatchedEnvFromInDeployment(*dep, envConfigMaps.Items)))
marshaledDeploy, err := json.Marshal(dep)
if err != nil {
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
}
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledDeploy)
}
const msg = "no suitable Jaeger instances found to inject a sidecar"
span.AddEvent(msg)
logger.V(-1).Info(msg)
return admission.Allowed(msg)
}
if hasAgent, _ := inject.HasJaegerAgent(dep); hasAgent {
if _, hasLabel := dep.Labels[inject.Label]; hasLabel {
const msg = "remove sidecar"
logger.Info(msg)
span.AddEvent(msg)
inject.CleanSidecar(dep.Labels[inject.Label], dep)
marshaledDeploy, err := json.Marshal(dep)
if err != nil {
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
}
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledDeploy)
}
}
return admission.Allowed("no action needed")
}
// deploymentInterceptor implements admission.DecoderInjector.
// A decoder will be automatically injected.
// InjectDecoder injects the decoder.
func (d *deploymentInterceptor) InjectDecoder(decoder *admission.Decoder) error {
d.decoder = decoder
return nil
}
func reconcileConfigMaps(ctx context.Context, cl client.Client, jaeger *v1.Jaeger, dep *appsv1.Deployment) error {
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
ctx, span := tracer.Start(ctx, "reconcileConfigMaps")
defer span.End()
cms := []*corev1.ConfigMap{}
if cm := ca.GetTrustedCABundle(jaeger); cm != nil {
cms = append(cms, cm)
}
if cm := ca.GetServiceCABundle(jaeger); cm != nil {
cms = append(cms, cm)
}
for _, cm := range cms {
if err := reconcileConfigMap(ctx, cl, cm, dep); err != nil {
return tracing.HandleError(err, span)
}
}
return nil
}
func reconcileConfigMap(ctx context.Context, cl client.Client, cm *corev1.ConfigMap, dep *appsv1.Deployment) error {
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
ctx, span := tracer.Start(ctx, "reconcileConfigMap")
defer span.End()
// Update the namespace to be the same as the Deployment being injected
cm.Namespace = dep.Namespace
span.SetAttributes(attribute.String("name", cm.Name), attribute.String("namespace", cm.Namespace))
if err := cl.Create(ctx, cm); err != nil {
if errors.IsAlreadyExists(err) {
span.AddEvent("config map exists already")
} else {
return tracing.HandleError(err, span)
}
}
return nil
}

View File

@ -1,436 +0,0 @@
package appsv1
import (
"bytes"
"context"
"encoding/json"
"fmt"
"sort"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
jsonpatch "gomodules.xyz/jsonpatch/v2"
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/inject"
)
func TestReconcileConfigMaps(t *testing.T) {
testCases := []struct {
desc string
existing []runtime.Object
errors errorGroup
expect error
}{
{
desc: "all config maps missing",
},
{
desc: "none missing",
existing: []runtime.Object{
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "my-instance-trusted-ca",
},
},
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "my-instance-service-ca",
},
},
},
},
{
desc: "can not create",
errors: errorGroup{createErr: fmt.Errorf("ups, cant create things")},
expect: fmt.Errorf("ups, cant create things"),
existing: []runtime.Object{
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "my-instance-trusted-ca",
},
},
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "my-instance-service-ca",
},
},
},
},
}
for _, tC := range testCases {
t.Run(tC.desc, func(t *testing.T) {
// prepare
jaeger := v1.NewJaeger(types.NamespacedName{
Namespace: "observability",
Name: "my-instance",
})
dep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns1",
Name: "my-dep",
},
}
cl := &failingClient{
WithWatch: fake.NewClientBuilder().WithRuntimeObjects(tC.existing...).Build(),
errors: tC.errors,
}
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
// test
err := reconcileConfigMaps(context.Background(), cl, jaeger, &dep)
// verify
assert.Equal(t, tC.expect, err)
cms := corev1.ConfigMapList{}
err = cl.List(context.Background(), &cms)
require.NoError(t, err)
assert.Len(t, cms.Items, 2)
})
}
}
type failingClient struct {
client.WithWatch
errors errorGroup
}
type errorGroup struct {
listErr error
getErr error
createErr error
}
func (u *failingClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if u.errors.listErr != nil {
return u.errors.listErr
}
return u.WithWatch.List(ctx, list, opts...)
}
func (u *failingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
if u.errors.getErr != nil {
return u.errors.getErr
}
return u.WithWatch.Get(ctx, key, obj, opts...)
}
func (u *failingClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
if u.errors.createErr != nil {
return u.errors.createErr
}
return u.WithWatch.Create(ctx, obj, opts...)
}
func TestReconcilieDeployment(t *testing.T) {
namespacedName := types.NamespacedName{
Name: "jaeger-query",
Namespace: "my-ns",
}
jaeger := v1.NewJaeger(types.NamespacedName{
Namespace: "observability",
Name: "my-instance",
})
s := scheme.Scheme
s.AddKnownTypes(v1.GroupVersion, jaeger)
s.AddKnownTypes(v1.GroupVersion, &v1.JaegerList{})
testCases := []struct {
desc string
dep *appsv1.Deployment
jaeger *v1.Jaeger
resp admission.Response
errors errorGroup
emptyRequest bool
watch_ns string
}{
{
desc: "no content to decode",
dep: &appsv1.Deployment{},
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Message: "there is no content to decode",
Code: 400,
},
},
},
emptyRequest: true,
},
{
desc: "can not get namespaces and list jaegers",
errors: errorGroup{
listErr: fmt.Errorf("ups cant list"),
getErr: fmt.Errorf("ups cant get"),
},
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{},
Labels: map[string]string{
"app": "not jaeger",
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "only_container",
}},
},
},
},
}),
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Message: "ups cant list",
Code: 500,
},
},
},
},
{
desc: "Should not remove the instance from a jaeger component",
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{},
Labels: map[string]string{
"app": "jaeger",
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "only_container",
}},
},
},
},
}),
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "is jaeger deployment, we do not touch it",
Code: 200,
},
},
},
},
{
desc: "Should remove the instance",
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "only_container",
}},
},
},
},
}),
resp: admission.Response{
Patches: []jsonpatch.JsonPatchOperation{
{
Operation: "remove",
Path: "/metadata/labels",
},
{
Operation: "remove",
Path: "/spec/template/spec/containers/1",
},
},
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
PatchType: func() *admissionv1.PatchType { str := admissionv1.PatchTypeJSONPatch; return &str }(),
},
},
},
{
desc: "Should inject but no jaeger instace found",
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{
inject.Annotation: "true",
},
Labels: map[string]string{
"app": "something",
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "only_container",
}},
},
},
},
}),
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "no suitable Jaeger instances found to inject a sidecar",
Code: 200,
},
},
},
},
{
desc: "Should inject but empty instance - no patch",
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{
inject.Annotation: "true",
},
Labels: map[string]string{
"app": "something",
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "only_container",
}},
},
},
},
}),
resp: admission.Response{
Patches: []jsonpatch.JsonPatchOperation{},
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
},
},
jaeger: &v1.Jaeger{},
},
{
desc: "should not touch deployment on other namespaces != watch_namespaces",
dep: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Name,
Namespace: namespacedName.Namespace,
Annotations: map[string]string{},
Labels: map[string]string{
"app": "not jaeger",
},
},
Spec: appsv1.DeploymentSpec{},
},
resp: admission.Response{
AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Message: "not watching in namespace, we do not touch the deployment",
Code: 200,
},
},
},
watch_ns: "my-other-ns, other-ns-2",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
viper.Set(v1.ConfigWatchNamespace, tc.watch_ns)
defer viper.Reset()
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespacedName.Namespace,
},
}
res := []runtime.Object{tc.dep, ns}
if tc.jaeger != nil {
res = append(res, tc.jaeger)
}
cl := &failingClient{
WithWatch: fake.NewClientBuilder().WithRuntimeObjects(res...).Build(),
errors: tc.errors,
}
decoder := admission.NewDecoder(scheme.Scheme)
r := NewDeploymentInterceptorWebhook(cl, decoder)
req := admission.Request{}
if !tc.emptyRequest {
req = admission.Request{
AdmissionRequest: admissionv1.AdmissionRequest{
Name: tc.dep.Name,
Namespace: tc.dep.Namespace,
Object: runtime.RawExtension{
Raw: func() []byte {
var buf bytes.Buffer
if getErr := json.NewEncoder(&buf).Encode(tc.dep); getErr != nil {
t.Fatal(getErr)
}
return buf.Bytes()
}(),
},
},
}
}
resp := r.Handle(context.Background(), req)
assert.Len(t, resp.Patches, len(tc.resp.Patches))
sort.Slice(resp.Patches, func(i, j int) bool {
return resp.Patches[i].Path < resp.Patches[j].Path
})
sort.Slice(tc.resp.Patches, func(i, j int) bool {
return tc.resp.Patches[i].Path < tc.resp.Patches[j].Path
})
assert.Equal(t, tc.resp, resp)
})
}
}

View File

@ -1,43 +0,0 @@
package appsv1
import (
"context"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/jaegertracing/jaeger-operator/pkg/controller/namespace"
)
// NamespaceReconciler reconciles a Deployment object
type NamespaceReconciler struct {
reconcilier *namespace.ReconcileNamespace
}
// NewNamespaceReconciler creates a new namespace reconcilier controller
func NewNamespaceReconciler(client client.Client, clientReader client.Reader, scheme *runtime.Scheme) *NamespaceReconciler {
return &NamespaceReconciler{
reconcilier: namespace.New(client, clientReader, scheme),
}
}
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=namespaces/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch
// Reconcile namespace resource
func (r *NamespaceReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
return r.reconcilier.Reconcile(request)
}
// SetupWithManager sets up the controller with the Manager.
func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
err := ctrl.NewControllerManagedBy(mgr).
For(&corev1.Namespace{}).
Complete(r)
return err
}

View File

@ -1,55 +0,0 @@
package appsv1_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/appsv1"
)
func TestNamespaceControllerRegisterWithManager(t *testing.T) {
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
// prepare
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
require.NoError(t, err)
reconciler := appsv1.NewNamespaceReconciler(
k8sClient,
k8sClient,
testScheme,
)
// test
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewNamespaceInstance(t *testing.T) {
// prepare
nsn := types.NamespacedName{Name: "my-instance", Namespace: "default"}
reconciler := appsv1.NewNamespaceReconciler(
k8sClient,
k8sClient,
testScheme,
)
instance := v1.NewJaeger(nsn)
err := k8sClient.Create(context.Background(), instance)
require.NoError(t, err)
req := k8sreconcile.Request{
NamespacedName: nsn,
}
_, err = reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
}

View File

@ -1,57 +0,0 @@
package appsv1_test
import (
"fmt"
"os"
"path/filepath"
"testing"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
cfg, err := testEnv.Start()
if err != nil {
fmt.Printf("failed to start testEnv: %v", err)
os.Exit(1)
}
if err := v1.AddToScheme(scheme.Scheme); err != nil {
fmt.Printf("failed to register scheme: %v", err)
os.Exit(1)
}
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
if err != nil {
fmt.Printf("failed to setup a Kubernetes client: %v", err)
os.Exit(1)
}
code := m.Run()
err = testEnv.Stop()
if err != nil {
fmt.Printf("failed to stop testEnv: %v", err)
os.Exit(1)
}
os.Exit(code)
}

View File

@ -1,60 +0,0 @@
package elasticsearch
import (
"context"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"k8s.io/client-go/discovery"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
"github.com/jaegertracing/jaeger-operator/pkg/controller/elasticsearch"
)
// Reconciler reconciles a Deployment object
type Reconciler struct {
reconcilier *elasticsearch.ReconcileElasticsearch
}
// NewReconciler creates a new deployment reconciler controller
func NewReconciler(client client.Client, clientReader client.Reader) *Reconciler {
return &Reconciler{
reconcilier: elasticsearch.New(client, clientReader),
}
}
// +kubebuilder:rbac:groups=logging.openshift.io,resources=elasticsearch,verbs=get;list;watch;create;update;patch;delete
// Reconcile deployment resource
func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
return r.reconcilier.Reconcile(ctx, request)
}
// SetupWithManager sets up the controller with the Manager.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
esCRDInstalled, err := isOpenShiftESCRDAvailable(mgr)
if err != nil {
return err
}
if esCRDInstalled {
return ctrl.NewControllerManagedBy(mgr).
For(&esv1.Elasticsearch{}).
Complete(r)
}
return nil
}
const elasticsearchGroup = "logging.openshift.io"
func isOpenShiftESCRDAvailable(mgr ctrl.Manager) (bool, error) {
dcl, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return false, err
}
apiLists, err := autodetect.AvailableAPIs(dcl, map[string]bool{elasticsearchGroup: true})
if err != nil {
return false, err
}
return autodetect.IsElasticsearchOperatorAvailable(apiLists), nil
}

View File

@ -1,77 +0,0 @@
package elasticsearch_test
import (
"context"
"testing"
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/elasticsearch"
)
func TestElasticSearchSetupWithManager(t *testing.T) {
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
// prepare
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
require.NoError(t, err)
reconciler := elasticsearch.NewReconciler(
k8sClient,
k8sClient,
)
// test
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}
func TestNewElasticSearchInstance(t *testing.T) {
// prepare
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ns",
},
}
es := &esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{
Name: "test-es",
Namespace: "test-ns",
},
}
jaeger := v1.NewJaeger(types.NamespacedName{
Name: "test-jaeger",
Namespace: "test-jaeger",
})
esv1.AddToScheme(testScheme)
v1.AddToScheme(testScheme)
client := fake.NewClientBuilder().WithRuntimeObjects(ns, es, jaeger).Build()
reconciler := elasticsearch.NewReconciler(
client,
client,
)
req := k8sreconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-es",
Namespace: "test-ns",
},
}
_, err := reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
}

View File

@ -1,57 +0,0 @@
package elasticsearch_test
import (
"fmt"
"os"
"path/filepath"
"testing"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
// +kubebuilder:scaffold:imports
)
var (
k8sClient client.Client
testEnv *envtest.Environment
testScheme *runtime.Scheme = scheme.Scheme
)
func TestMain(m *testing.M) {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
cfg, err := testEnv.Start()
if err != nil {
fmt.Printf("failed to start testEnv: %v", err)
os.Exit(1)
}
if err := v1.AddToScheme(scheme.Scheme); err != nil {
fmt.Printf("failed to register scheme: %v", err)
os.Exit(1)
}
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
if err != nil {
fmt.Printf("failed to setup a Kubernetes client: %v", err)
os.Exit(1)
}
code := m.Run()
err = testEnv.Stop()
if err != nil {
fmt.Printf("failed to stop testEnv: %v", err)
os.Exit(1)
}
os.Exit(code)
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jaegertracing
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/operator-framework/operator-lib/handler"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/pkg/controller/jaeger"
)
// JaegerReconciler reconciles a Jaeger object
type JaegerReconciler struct {
reconcilier *jaeger.ReconcileJaeger
}
// NewReconciler creates a new jaeger reconcilier controller
func NewReconciler(client client.Client, clientReader client.Reader, scheme *runtime.Scheme) *JaegerReconciler {
return &JaegerReconciler{
reconcilier: jaeger.New(client, clientReader, scheme),
}
}
// +kubebuilder:rbac:groups=jaegertracing.io,resources=jaegers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=jaegertracing.io,resources=jaegers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=jaegertracing.io,resources=jaegers/finalizers,verbs=update
// +kubebuilder:rbac:groups=core,resources=configmaps;persistentvolumeclaims;pods;secrets;serviceaccounts;services;services/finalizers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses;ingressclasses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=create
// +kubebuilder:rbac:groups=console.openshift.io,resources=consolelinks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch,resources=jobs;cronjobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.openshift.io,resources=elasticsearches,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kafka.strimzi.io,resources=kafkas;kafkausers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
// +kubebuilder:rbac:groups=image.openshift.io,resources=imagestreams,verbs=get;list;watch
// Reconcile jaeger resource
func (r *JaegerReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
return r.reconcilier.Reconcile(request)
}
// SetupWithManager sets up the controller with the Manager.
func (r *JaegerReconciler) SetupWithManager(mgr ctrl.Manager) error {
err := ctrl.NewControllerManagedBy(mgr).
For(&v1.Jaeger{}).
Watches(
&v1.Jaeger{},
&handler.InstrumentedEnqueueRequestForObject{},
).
Complete(r)
return err
}

View File

@ -1,55 +0,0 @@
package jaegertracing_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
"github.com/jaegertracing/jaeger-operator/controllers/jaegertracing"
)
func TestNewJaegerInstance(t *testing.T) {
// prepare
nsn := types.NamespacedName{Name: "my-instance", Namespace: "default"}
reconciler := jaegertracing.NewReconciler(
k8sClient,
k8sClient,
testScheme,
)
instance := v1.NewJaeger(nsn)
err := k8sClient.Create(context.Background(), instance)
require.NoError(t, err)
req := k8sreconcile.Request{
NamespacedName: nsn,
}
_, err = reconciler.Reconcile(context.Background(), req)
require.NoError(t, err)
}
func TestRegisterWithManager(t *testing.T) {
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
// prepare
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
require.NoError(t, err)
reconciler := jaegertracing.NewReconciler(
k8sClient,
k8sClient,
testScheme,
)
// test
err = reconciler.SetupWithManager(mgr)
// verify
require.NoError(t, err)
}

Some files were not shown because too many files have changed in this diff Show More