Compare commits
No commits in common. "main" and "v1.28.0" have entirely different histories.
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "Uploading code coverage results"
|
||||
bash <(curl -s https://codecov.io/bash)
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
kubectl apply -f ./.ci/minikube-ingress.yaml
|
||||
kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=150s
|
|
@ -6,5 +6,4 @@ echo "${GITHUB_TOKEN}" | gh auth login --with-token
|
|||
gh config set prompt disabled
|
||||
gh release create \
|
||||
-t "Release ${OPERATOR_VERSION}" \
|
||||
"${OPERATOR_VERSION}" \
|
||||
'dist/jaeger-operator.yaml#Installation manifest for Kubernetes'
|
||||
"${OPERATOR_VERSION}"
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
./bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)
|
||||
${GOPATH}/bin/goimports -local "github.com/jaegertracing/jaeger-operator" -l -w $(git ls-files "*\.go" | grep -v vendor)
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
VERSION="3.10.0"
|
||||
|
||||
if [[ -z "${GOPATH}" ]]; then
|
||||
DEST="/usr/local/bin/gomplate"
|
||||
export PATH=$PATH:/usr/local/bin
|
||||
SUDO="sudo"
|
||||
else
|
||||
DEST="${GOPATH}/bin/gomplate"
|
||||
SUDO=
|
||||
fi
|
||||
|
||||
|
||||
if [ ! -f ${DEST} ]; then
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
$SUDO curl https://github.com/hairyhenderson/gomplate/releases/download/v${VERSION}/gomplate_darwin-amd64-slim -sLo ${DEST}
|
||||
else
|
||||
$SUDO curl https://github.com/hairyhenderson/gomplate/releases/download/v${VERSION}/gomplate_linux-amd64-slim -sLo ${DEST}
|
||||
fi
|
||||
$SUDO chmod +x ${DEST}
|
||||
fi
|
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
sudo curl -Lo /usr/local/bin/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.11.1/kubectl-kuttl_0.11.1_linux_x86_64
|
||||
sudo chmod +x /usr/local/bin/kubectl-kuttl
|
||||
export PATH=$PATH:/usr/local/bin
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
DEST="${GOPATH}/bin/operator-sdk"
|
||||
|
||||
function install_sdk() {
|
||||
echo "Downloading the operator-sdk ${SDK_VERSION} into ${DEST}"
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
curl https://github.com/operator-framework/operator-sdk/releases/download/${SDK_VERSION}/operator-sdk-${SDK_VERSION}-x86_64-apple-darwin -sLo ${DEST}
|
||||
else
|
||||
curl https://github.com/operator-framework/operator-sdk/releases/download/${SDK_VERSION}/operator-sdk-${SDK_VERSION}-x86_64-linux-gnu -sLo ${DEST}
|
||||
fi
|
||||
chmod +x ${DEST}
|
||||
}
|
||||
|
||||
mkdir -p ${GOPATH}/bin
|
||||
|
||||
if [ ! -f ${DEST} ]; then
|
||||
install_sdk
|
||||
fi
|
||||
|
||||
${DEST} version | grep -q ${SDK_VERSION}
|
||||
if [ $? != 0 ]; then
|
||||
install_sdk
|
||||
fi
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
make install
|
||||
RT=$?
|
||||
if [ ${RT} != 0 ]; then
|
||||
echo "Failed to install the operator dependencies."
|
||||
exit ${RT}
|
||||
fi
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
GOLINT=golint
|
||||
|
||||
command -v ${GOLINT} > /dev/null
|
||||
if [ $? != 0 ]; then
|
||||
if [ -n ${GOPATH} ]; then
|
||||
GOLINT="${GOPATH}/bin/golint"
|
||||
fi
|
||||
fi
|
||||
|
||||
out=$(${GOLINT} ./... | grep -v pkg/storage/elasticsearch/v1 | grep -v zz_generated)
|
||||
if [[ $out ]]; then
|
||||
echo "$out"
|
||||
exit 1
|
||||
fi
|
|
@ -0,0 +1,639 @@
|
|||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
# see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md for all possible options and their description
|
||||
hsts: "false"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: tcp-services
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: udp-services
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io # k8s 1.14+
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- ingress-controller-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service-webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: https-webhook
|
||||
port: 443
|
||||
targetPort: webhook
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
revisionHistoryLimit: 10
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
gcp-auth-skip-secret: "true"
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
containers:
|
||||
- name: controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-controller-leader
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --report-node-internal-ip-address
|
||||
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
|
||||
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
- --watch-ingress-without-class=true
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
hostPort: 443
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: webhook-cert
|
||||
mountPath: /usr/local/certificates/
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
serviceAccountName: ingress-nginx
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
|
||||
# before changing this value, check the required kubernetes version
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
|
||||
# Currently(v0.49.0), ValidatingWebhookConfiguration of this validates v1beta1 request
|
||||
# TODO(govargo): check this after upstream ingress-nginx can validate v1 version
|
||||
# https://github.com/kubernetes/ingress-nginx/blob/controller-v0.49.0/internal/admission/controller/main.go#L46-L52
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
name: ingress-nginx-admission
|
||||
webhooks:
|
||||
- name: validate.nginx.ingress.kubernetes.io
|
||||
matchPolicy: Equivalent
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: ingress-nginx
|
||||
name: ingress-nginx-controller-admission
|
||||
path: /networking/v1/ingresses
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-create
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: create
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- create
|
||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --secret-name=ingress-nginx-admission
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
---
|
||||
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ingress-nginx-admission-patch
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
spec:
|
||||
containers:
|
||||
- name: patch
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- patch
|
||||
- --webhook-name=ingress-nginx-admission
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --patch-mutating=false
|
||||
- --secret-name=ingress-nginx-admission
|
||||
- --patch-failure-policy=Fail
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
|
@ -20,6 +20,15 @@ fi
|
|||
OLD_PWD=$(pwd)
|
||||
VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
|
||||
|
||||
PKG_FILE=deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
|
||||
CSV_FILE=deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml
|
||||
CRD_FILE=deploy/crds/jaegertracing.io_jaegers_crd.yaml
|
||||
|
||||
# once we get a clarification on the following item, we might not need to have different file names
|
||||
# https://github.com/operator-framework/community-operators/issues/701
|
||||
DEST_PKG_FILE=jaeger.package.yaml
|
||||
DEST_CSV_FILE=jaeger.v${VERSION}.clusterserviceversion.yaml
|
||||
|
||||
for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
|
||||
cd "${LOCAL_REPOSITORIES_PATH}/${dest}"
|
||||
git remote | grep upstream > /dev/null
|
||||
|
@ -32,7 +41,11 @@ for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do
|
|||
git checkout -q main
|
||||
git rebase -q upstream/main
|
||||
|
||||
cp -r "${OLD_PWD}/bundle" "operators/jaeger/${VERSION}"
|
||||
mkdir -p "operators/jaeger/${VERSION}"
|
||||
|
||||
cp "${OLD_PWD}/${PKG_FILE}" "operators/jaeger/${DEST_PKG_FILE}"
|
||||
cp "${OLD_PWD}/${CSV_FILE}" "operators/jaeger/${VERSION}/${DEST_CSV_FILE}"
|
||||
cp "${OLD_PWD}/${CRD_FILE}" "operators/jaeger/${VERSION}"
|
||||
|
||||
git checkout -q -b Update-Jaeger-to-${VERSION}
|
||||
if [[ $? != 0 ]]; then
|
||||
|
|
|
@ -1,37 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ -z $OPERATOR_VERSION ]]; then
|
||||
if [[ "${OPERATOR_VERSION}x" == "x" ]]; then
|
||||
echo "OPERATOR_VERSION isn't set. Skipping process."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
|
||||
JAEGER_VERSION=$(echo $JAEGER_VERSION | tr -d '"')
|
||||
JAEGER_AGENT_VERSION=$(echo $JAEGER_AGENT_VERSION | tr -d '"')
|
||||
|
||||
|
||||
BASE_BUILD_IMAGE=${BASE_BUILD_IMAGE:-"jaegertracing/jaeger-operator"}
|
||||
TAG=${TAG:-"v${OPERATOR_VERSION}"}
|
||||
BUILD_IMAGE=${BUILD_IMAGE:-"${BASE_BUILD_IMAGE}:${OPERATOR_VERSION}"}
|
||||
PREVIOUS_VERSION=$(grep operator= versions.txt | awk -F= '{print $2}')
|
||||
|
||||
# changes to deploy/operator.yaml
|
||||
sed "s~image: jaegertracing/jaeger-operator.*~image: ${BUILD_IMAGE}~gi" -i deploy/operator.yaml
|
||||
|
||||
# change the versions.txt, bump only operator version.
|
||||
sed "s~operator=${PREVIOUS_VERSION}~operator=${OPERATOR_VERSION}~gi" -i versions.txt
|
||||
|
||||
# changes to deploy/operator.yaml
|
||||
sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSION}~i" -i config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
|
||||
mkdir -p deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}
|
||||
cp deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml \
|
||||
deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
|
||||
# Update the examples according to the release
|
||||
operator-sdk generate csv \
|
||||
--csv-channel=stable \
|
||||
--make-manifests=false \
|
||||
--csv-version=${OPERATOR_VERSION}
|
||||
|
||||
sed -i "s~all-in-one:.*~all-in-one:${JAEGER_VERSION}~gi" examples/all-in-one-with-options.yaml
|
||||
# changes to deploy/olm-catalog/jaeger-operator/manifests
|
||||
sed "s~containerImage: docker.io/jaegertracing/jaeger-operator:${PREVIOUS_VERSION}~containerImage: docker.io/jaegertracing/jaeger-operator:${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
sed "s~image: jaegertracing/jaeger-operator:${PREVIOUS_VERSION}~image: jaegertracing/jaeger-operator:${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
sed "s~replaces: jaeger-operator.v.*~replaces: jaeger-operator.v${PREVIOUS_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
sed "s~version: ${PREVIOUS_VERSION}~version: ${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
sed "s~name: jaeger-operator.v${PREVIOUS_VERSION}~name: jaeger-operator.v${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml
|
||||
|
||||
# statefulset-manual-sidecar
|
||||
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/statefulset-manual-sidecar.yaml
|
||||
# changes to deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
|
||||
sed "s~currentCSV: jaeger-operator.v${PREVIOUS_VERSION}~currentCSV: jaeger-operator.v${OPERATOR_VERSION}~i" -i deploy/olm-catalog/jaeger-operator/jaeger-operator.package.yaml
|
||||
|
||||
# operator-with-tracing
|
||||
sed -i "s~jaeger-operator:.*~jaeger-operator:${OPERATOR_VERSION}~gi" examples/operator-with-tracing.yaml
|
||||
sed -i "s~jaeger-agent:.*~jaeger-agent:${JAEGER_AGENT_VERSION}~gi" examples/operator-with-tracing.yaml
|
||||
|
||||
# tracegen
|
||||
sed -i "s~jaeger-tracegen:.*~jaeger-tracegen:${JAEGER_VERSION}~gi" examples/tracegen.yaml
|
||||
|
||||
|
||||
VERSION=${OPERATOR_VERSION} USER=jaegertracing make bundle
|
||||
cp deploy/olm-catalog/jaeger-operator/${OPERATOR_VERSION}/jaeger-operator.v${OPERATOR_VERSION}.clusterserviceversion.yaml \
|
||||
deploy/olm-catalog/jaeger-operator/manifests/jaeger-operator.clusterserviceversion.yaml
|
|
@ -4,7 +4,7 @@ BASE_BUILD_IMAGE=${BASE_BUILD_IMAGE:-"jaegertracing/jaeger-operator"}
|
|||
OPERATOR_VERSION=${OPERATOR_VERSION:-$(git describe --tags)}
|
||||
|
||||
## if we are on a release tag, let's extract the version number
|
||||
## the other possible value, currently, is 'main' (or another branch name)
|
||||
## the other possible value, currently, is 'master' (or another branch name)
|
||||
## if we are not running in the CI, it fallsback to the `git describe` above
|
||||
if [[ $OPERATOR_VERSION == v* ]]; then
|
||||
OPERATOR_VERSION=$(echo ${OPERATOR_VERSION} | grep -Po "([\d\.]+)")
|
||||
|
@ -12,7 +12,6 @@ if [[ $OPERATOR_VERSION == v* ]]; then
|
|||
fi
|
||||
|
||||
BUILD_IMAGE=${BUILD_IMAGE:-"${BASE_BUILD_IMAGE}:${OPERATOR_VERSION}"}
|
||||
DOCKER_USERNAME=${DOCKER_USERNAME:-"jaegertracingbot"}
|
||||
|
||||
if [ "${DOCKER_PASSWORD}x" != "x" -a "${DOCKER_USERNAME}x" != "x" ]; then
|
||||
echo "Performing a 'docker login'"
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env bash
|
||||
set -x
|
||||
|
||||
[[ -z "$TEST_GROUP" ]] && { echo "TEST_GROUP is undefined, exiting" ; exit 1; }
|
||||
|
||||
## Since we're running MiniKube with --vm-driver none, change imagePullPolicy to get the image locally
|
||||
sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' test/operator.yaml
|
||||
## remove this once #947 is fixed
|
||||
export VERBOSE='-v -timeout 20m'
|
||||
if [ "${TEST_GROUP}" = "es" ]; then
|
||||
echo "Running elasticsearch tests"
|
||||
make es
|
||||
make e2e-tests-es
|
||||
elif [ "${TEST_GROUP}" = "es-self-provisioned" ]; then
|
||||
echo "Running self provisioned elasticsearch tests"
|
||||
make e2e-tests-self-provisioned-es
|
||||
res=$?
|
||||
if [[ ${res} -ne 0 ]]; then
|
||||
kubectl log deploy/elasticsearch-operator -n openshift-logging
|
||||
fi
|
||||
exit ${res}
|
||||
elif [ "${TEST_GROUP}" = "smoke" ]
|
||||
then
|
||||
echo "Running Smoke Tests"
|
||||
make e2e-tests-smoke
|
||||
elif [ "${TEST_GROUP}" = "cassandra" ]
|
||||
then
|
||||
echo "Running Cassandra Tests"
|
||||
make cassandra
|
||||
make e2e-tests-cassandra
|
||||
elif [ "${TEST_GROUP}" = "streaming" ]
|
||||
then
|
||||
echo "Running Streaming Tests"
|
||||
make e2e-tests-streaming
|
||||
elif [ "${TEST_GROUP}" = "examples1" ]
|
||||
then
|
||||
echo "Running Examples1 Tests"
|
||||
make e2e-tests-examples1
|
||||
elif [ "${TEST_GROUP}" = "examples2" ]
|
||||
then
|
||||
echo "Running Examples2 Tests"
|
||||
make e2e-tests-examples2
|
||||
elif [ "${TEST_GROUP}" = "es-token-propagation" ]
|
||||
then
|
||||
echo "Running token propagation tests"
|
||||
make e2e-tests-token-propagation-es
|
||||
elif [ "${TEST_GROUP}" = "generate" ]
|
||||
then
|
||||
echo "Running CLI manifest generation tests"
|
||||
make e2e-tests-generate
|
||||
elif [ "${TEST_GROUP}" = "upgrade" ]
|
||||
then
|
||||
echo "Running upgrade tests"
|
||||
make e2e-tests-upgrade
|
||||
elif [ "${TEST_GROUP}" = "istio" ]
|
||||
then
|
||||
echo "Running Smoke Tests with istio"
|
||||
make e2e-tests-istio
|
||||
else
|
||||
echo "Unknown TEST_GROUP [${TEST_GROUP}]"; exit 1
|
||||
fi
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
make install-tools ci
|
||||
RT=$?
|
||||
if [ ${RT} != 0 ]; then
|
||||
echo "Failed to build the operator."
|
||||
exit ${RT}
|
||||
fi
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
## this script is meant to be executed in a CI executor based on Ubuntu 18.04 and hasn't been tested anywhere else
|
||||
sudo apt-get remove docker docker-engine docker.io containerd runc
|
||||
sudo apt-get update
|
||||
sudo apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
sudo apt-key fingerprint 0EBFCD88
|
||||
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker-ce docker-ce-cli containerd.io socat
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
bash <(curl -s https://codecov.io/bash)
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
if [ $# = 0 ]; then
|
||||
usage: vgot cmdpackage[@version]... >&2
|
||||
exit 2
|
||||
fi
|
||||
d=`mktemp -d`
|
||||
cd "$d"
|
||||
go mod init temp >/dev/null 2>&1
|
||||
for i; do
|
||||
pkg=`echo $i | sed 's/@.*//'`
|
||||
go get "$i" &&
|
||||
go install "$pkg" &&
|
||||
echo installed `go list -f '{{.ImportPath}}@{{.Module.Version}}' "$pkg"`
|
||||
done
|
||||
rm -r "$d"
|
22
.codecov.yml
22
.codecov.yml
|
@ -1,16 +1,8 @@
|
|||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
# this allows a 0.1% drop from the previous base commit coverage
|
||||
threshold: 0.1%
|
||||
ignore:
|
||||
- "apis/v1/zz_generated.deepcopy.go"
|
||||
- "apis/v1/zz_generated.defaults.go"
|
||||
- "apis/v1/zz_generated.openapi.go"
|
||||
- "apis/v1/groupversion_info.go"
|
||||
- "pkg/kafka/v1beta2/zz_generated.deepcopy.go"
|
||||
- "pkg/kafka/v1beta2/zz_generated.openapi.go"
|
||||
- "pkg/kafka/v1beta2/groupversion_info.go"
|
||||
- "pkg/util/k8s_utils.go"
|
||||
- "pkg/apis/io/v1alpha1/zz_generated.deepcopy.go"
|
||||
- "pkg/apis/jaegertracing/v1/zz_generated.deepcopy.go"
|
||||
- "pkg/apis/io/v1alpha1/zz_generated.defaults.go"
|
||||
- "pkg/apis/jaegertracing/v1/zz_generated.defaults.go"
|
||||
- "pkg/apis/jaegertracing/v1/zz_generated.openapi.go"
|
||||
- "pkg/apis/kafka/v1beta2/zz_generated.deepcopy.go"
|
||||
- "pkg/apis/kafka/v1beta2/zz_generated.openapi.go"
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
# Ignore build and test binaries.
|
||||
bin/
|
||||
testbin/
|
|
@ -1,62 +1,11 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: docker
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "03:00"
|
||||
timezone: "Europe/Berlin"
|
||||
labels:
|
||||
- dependencies
|
||||
- docker
|
||||
- ok-to-test
|
||||
- package-ecosystem: docker
|
||||
directory: "/tests"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "03:00"
|
||||
timezone: "Europe/Berlin"
|
||||
labels:
|
||||
- dependencies
|
||||
- docker
|
||||
- ok-to-test
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "03:00"
|
||||
timezone: "Europe/Berlin"
|
||||
labels:
|
||||
- dependencies
|
||||
- go
|
||||
- ok-to-test
|
||||
groups:
|
||||
golang-org-x:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
opentelemetry:
|
||||
patterns:
|
||||
- "go.opentelemetry.io/*"
|
||||
prometheus:
|
||||
patterns:
|
||||
- "github.com/prometheus-operator/prometheus-operator"
|
||||
- "github.com/prometheus-operator/prometheus-operator/*"
|
||||
- "github.com/prometheus/prometheus"
|
||||
- "github.com/prometheus/prometheus/*"
|
||||
- "github.com/prometheus/client_go"
|
||||
- "github.com/prometheus/client_go/*"
|
||||
kubernetes:
|
||||
patterns:
|
||||
- "k8s.io/*"
|
||||
- "sigs.k8s.io/*"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
time: "03:00"
|
||||
timezone: "Europe/Berlin"
|
||||
labels:
|
||||
- dependencies
|
||||
- github_actions
|
||||
- ok-to-test
|
||||
|
|
|
@ -2,41 +2,26 @@ name: "CI Workflow"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
basic-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
USER: jaegertracing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
- uses: actions/setup-go@v2.1.4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
|
||||
- name: "install kubebuilder"
|
||||
run: ./hack/install/install-kubebuilder.sh
|
||||
|
||||
- name: "install kustomize"
|
||||
run: ./hack/install/install-kustomize.sh
|
||||
go-version: 1.16
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: jpkrohling/setup-operator-sdk@v1.0.2
|
||||
with:
|
||||
operator-sdk-version: v0.18.2
|
||||
|
||||
- name: "basic checks"
|
||||
run: make install-tools ci
|
||||
run: ./.ci/script.sh
|
||||
|
||||
- name: "upload test coverage report"
|
||||
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: ./.ci/upload-test-coverage.sh
|
||||
|
|
|
@ -2,23 +2,12 @@ name: "CodeQL"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
codeql-analyze:
|
||||
permissions:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/autobuild to send a status report
|
||||
name: CodeQL Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
@ -29,24 +18,15 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: "Set up Go"
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
# Disable CodeQL for tests
|
||||
# https://github.com/github/codeql/issues/4786
|
||||
- run: rm -rf ./tests
|
||||
uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
|
||||
uses: github/codeql-action/analyze@v1
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
name: "Kubernetes end-to-end tests"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
end-to-end:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, upgrade, istio]
|
||||
kube-version:
|
||||
- "v1.19.1"
|
||||
- "v1.22.0"
|
||||
steps:
|
||||
- uses: actions/setup-go@v2.1.4
|
||||
with:
|
||||
go-version: 1.16
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: "setup docker"
|
||||
run: ./.ci/setup-docker.sh
|
||||
- uses: manusa/actions-setup-minikube@v2.4.2
|
||||
with:
|
||||
minikube version: 'v1.23.1'
|
||||
kubernetes version: ${{ matrix.kube-version }}
|
||||
driver: none
|
||||
github token: ${{ secrets.GITHUB_TOKEN }}
|
||||
start args: ' --wait=all'
|
||||
- uses: jpkrohling/setup-kubectl@v1.0.2
|
||||
- name: "setup ingress"
|
||||
run: ./.ci/apply-ingress.sh
|
||||
- uses: jpkrohling/setup-operator-sdk@v1.0.2
|
||||
with:
|
||||
operator-sdk-version: v0.18.2
|
||||
|
||||
- name: "install go tools"
|
||||
run: make install-tools
|
||||
|
||||
- name: "running end to end test"
|
||||
env:
|
||||
CI: true
|
||||
TEST_GROUP: ${{ matrix.TEST_GROUP }}
|
||||
run: ./.ci/run-e2e-tests.sh
|
|
@ -0,0 +1,36 @@
|
|||
name: "End-to-end tests (kuttl)"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
e2e-tests:
|
||||
name: End-to-end tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
kube-version:
|
||||
- "1.19"
|
||||
- "1.20"
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
steps:
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2.1.4
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: "install kuttl"
|
||||
run: ./.ci/install-kuttl.sh
|
||||
|
||||
- name: "run tests"
|
||||
env:
|
||||
KUBE_VERSION: ${{ matrix.kube-version }}
|
||||
run: make install kuttl-e2e KUBE_VERSION=$KUBE_VERSION
|
|
@ -1,84 +0,0 @@
|
|||
name: E2E tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
concurrency:
|
||||
group: e2e-tests-${{ github.ref }}-${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: "Run ${{ matrix.testsuite.label }} E2E tests (${{ matrix.kube-version }})"
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kube-version:
|
||||
- "1.19"
|
||||
- "1.30"
|
||||
testsuite:
|
||||
- { name: "elasticsearch", label: "Elasticsearch" }
|
||||
- { name: "examples", label: "Examples" }
|
||||
- { name: "generate", label: "Generate" }
|
||||
- { name: "miscellaneous", label: "Miscellaneous" }
|
||||
- { name: "sidecar", label: "Sidecar" }
|
||||
- { name: "streaming", label: "Streaming" }
|
||||
- { name: "ui", label: "UI" }
|
||||
- { name: "upgrade", label: "Upgrade" }
|
||||
steps:
|
||||
- name: "Check out code into the Go module directory"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: "Set up Go"
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
install: true
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: e2e-${{ github.sha }}
|
||||
restore-keys: |
|
||||
e2e-
|
||||
- name: "Install KIND"
|
||||
run: ./hack/install/install-kind.sh
|
||||
shell: bash
|
||||
- name: "Install KUTTL"
|
||||
run: ./hack/install/install-kuttl.sh
|
||||
shell: bash
|
||||
- name: "Install gomplate"
|
||||
run: ./hack/install/install-gomplate.sh
|
||||
shell: bash
|
||||
- name: "Install dependencies"
|
||||
run: make install-tools
|
||||
shell: bash
|
||||
- name: "Run ${{ matrix.testsuite.label }} E2E test suite on Kube ${{ matrix.kube-version }}"
|
||||
env:
|
||||
VERBOSE: "true"
|
||||
KUBE_VERSION: "${{ matrix.kube-version }}"
|
||||
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
|
||||
run: make run-e2e-tests-${{ matrix.testsuite.name }}
|
||||
shell: bash
|
||||
# Temp fix
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
shell: bash
|
|
@ -1,54 +0,0 @@
|
|||
name: Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '45 13 * * 1'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10
|
||||
with:
|
||||
sarif_file: results.sarif
|
|
@ -2,27 +2,20 @@ name: "Publish images"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
USER: jaegertracing
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: docker/setup-qemu-action@v1.2.0
|
||||
- uses: docker/setup-buildx-action@v1.6.0
|
||||
- name: "publishes the images"
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
OPERATOR_VERSION: main
|
||||
OPERATOR_VERSION: master
|
||||
run: ./.ci/publish-images.sh
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
|
||||
name: "Prepare the release"
|
||||
name: "Release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
|
@ -7,37 +6,25 @@ on:
|
|||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
USER: jaegertracing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
- uses: actions/setup-go@v2.1.4
|
||||
with:
|
||||
go-version: "1.22"
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: "install kubebuilder"
|
||||
run: ./hack/install/install-kubebuilder.sh
|
||||
|
||||
- name: "install kustomize"
|
||||
run: ./hack/install/install-kustomize.sh
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: "generate release resources"
|
||||
run: make release-artifacts USER=jaegertracing
|
||||
|
||||
- name: "create the release in GitHub"
|
||||
go-version: 1.16
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: jpkrohling/setup-operator-sdk@v1.0.2
|
||||
with:
|
||||
operator-sdk-version: v0.18.2
|
||||
- uses: docker/setup-qemu-action@v1.2.0
|
||||
- uses: docker/setup-buildx-action@v1.6.0
|
||||
- name: "perform the release"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: ./.ci/create-release-github.sh
|
||||
|
||||
- name: "publishes the images"
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
run: ./.ci/publish-images.sh
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
name: "Operator-SDK Scorecard"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
operator-sdk-scorecard:
|
||||
name: "Operator-SDK Scorecard"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Check out code"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: "Install KIND"
|
||||
run: ./hack/install/install-kind.sh
|
||||
- name: "Install KUTTL"
|
||||
run: ./hack/install/install-kuttl.sh
|
||||
- name: "Run Operator-SDK scorecard test"
|
||||
env:
|
||||
DOCKER_BUILD_OPTIONS: "--cache-from type=local,src=/tmp/.buildx-cache --cache-to type=local,dest=/tmp/.buildx-cache-new,mode=max --load"
|
||||
run: make scorecard-tests-local
|
|
@ -3,10 +3,7 @@ build/_output
|
|||
build/_test
|
||||
deploy/test
|
||||
vendor
|
||||
bin
|
||||
tests/_build
|
||||
_build
|
||||
logs
|
||||
# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
|
||||
### Emacs ###
|
||||
# -*- mode: gitignore; -*-
|
||||
|
@ -89,10 +86,3 @@ fmt.log
|
|||
import.log
|
||||
### Kubernetes ###
|
||||
kubeconfig
|
||||
bin
|
||||
### Timestamp files to avoid rebuilding Docker images if not needed ###
|
||||
build-assert-job
|
||||
docker-e2e-upgrade-image
|
||||
build-e2e-upgrade-image
|
||||
### Reports for E2E tests
|
||||
reports
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
issues:
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "SA1019:"
|
||||
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: github.com/jaegertracing/jaeger-operator
|
||||
gosimple:
|
||||
go: "1.22"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bidichk
|
||||
- errorlint
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gosec
|
||||
- govet
|
||||
- misspell
|
||||
- testifylint
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
go: '1.22'
|
||||
timeout: 10m
|
231
CHANGELOG.md
231
CHANGELOG.md
|
@ -1,236 +1,5 @@
|
|||
Changes by Version
|
||||
==================
|
||||
## v1.65.0 (2025-01-22)
|
||||
|
||||
* Pin agent version to 1.62.0 ([#2790](https://github.com/jaegertracing/jaeger-operator/pull/2790), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Added compatibility for Jaeger Operator v1.61.x and v1.62.x ([#2725](https://github.com/jaegertracing/jaeger-operator/pull/2725), [@mooneeb](https://github.com/mooneeb))
|
||||
|
||||
## v1.62.0 (2024-10-10)
|
||||
|
||||
* TRACING-4238 | Fix gatewat 502 timeout ([#2694](https://github.com/jaegertracing/jaeger-operator/pull/2694), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* feat: added missing test for elasticsearch reconciler ([#2662](https://github.com/jaegertracing/jaeger-operator/pull/2662), [@Ankit152](https://github.com/Ankit152))
|
||||
|
||||
## v1.61.0 (2024-09-16)
|
||||
|
||||
* Bump google.golang.org/grpc from 1.66.0 to 1.66.1 ([#2675](https://github.com/jaegertracing/jaeger-operator/pull/2675), [@dependabot[bot]](https://github.com/apps/dependabot))
|
||||
* Bump google.golang.org/grpc from 1.65.0 to 1.66.0 ([#2670](https://github.com/jaegertracing/jaeger-operator/pull/2670), [@dependabot[bot]](https://github.com/apps/dependabot))
|
||||
* Bump the opentelemetry group with 9 updates ([#2668](https://github.com/jaegertracing/jaeger-operator/pull/2668), [@dependabot[bot]](https://github.com/apps/dependabot))
|
||||
|
||||
## v1.60.0 (2024-08-13)
|
||||
* Fix Golang version in go.mod ([#2652](https://github.com/jaegertracing/jaeger-operator/pull/2652), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
## v1.60.0 (2024-08-09)
|
||||
* Test on k8s 1.30 ([#2647](https://github.com/jaegertracing/jaeger-operator/pull/2647), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Bump go to 1.22 and controller-gen to 1.14 ([#2646](https://github.com/jaegertracing/jaeger-operator/pull/2646), [@pavolloffay](https://github.com/pavolloffay))
|
||||
|
||||
## v1.59.0 (2024-08-06)
|
||||
* Update compatibility matrix for v1.57.x ([#2594](https://github.com/jaegertracing/jaeger-operator/pull/2594), [@mooneeb](https://github.com/mooneeb))
|
||||
* imagePullSecrets is not set for agent DaemonSet ([#2563](https://github.com/jaegertracing/jaeger-operator/pull/2563), [@antoniomerlin](https://github.com/antoniomerlin))
|
||||
|
||||
## v1.57.0 (2024-05-06)
|
||||
|
||||
## v1.55.0 (2024-03-22)
|
||||
* Add server URL to JaegerMetricsStorageSpec ([#2481](https://github.com/jaegertracing/jaeger-operator/pull/2481), [@antoniomerlin](https://github.com/antoniomerlin))
|
||||
* Use the host set in the Ingess field for the OpenShift Route ([#2409](https://github.com/jaegertracing/jaeger-operator/pull/2409), [@iblancasa](https://github.com/iblancasa))
|
||||
* Add minimum Kubernetes and OpenShift versions ([#2492](https://github.com/jaegertracing/jaeger-operator/pull/2492), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
|
||||
|
||||
## v1.54.0 (2024-02-14)
|
||||
* apis/v1: add jaeger agent deprecation warning ([#2471](https://github.com/jaegertracing/jaeger-operator/pull/2471), [@frzifus](https://github.com/frzifus))
|
||||
|
||||
## V1.53.0 (2024-01-17)
|
||||
* Choose the newer autoscaling version by default ([#2374](https://github.com/jaegertracing/jaeger-operator/pull/2374), [@iblancasa](https://github.com/iblancasa))
|
||||
* Upgrade operator-sdk to 1.32.0 ([#2388](https://github.com/jaegertracing/jaeger-operator/pull/2388), [@iblancasa](https://github.com/iblancasa))
|
||||
* Fix containerImage field and remove statement about failing CI ([#2386](https://github.com/jaegertracing/jaeger-operator/pull/2386), [@iblancasa](https://github.com/iblancasa))
|
||||
* Fix injection: prefer jaeger in the same namespace ([#2383](https://github.com/jaegertracing/jaeger-operator/pull/2383), [@pavolloffay](https://github.com/pavolloffay))
|
||||
|
||||
## v1.52.0 (2023-12-07)
|
||||
* Add missing container security context settings and tests ([#2354](https://github.com/jaegertracing/jaeger-operator/pull/2354), [@tingeltangelthomas](https://github.com/tingeltangelthomas))
|
||||
|
||||
## v1.51.0 (2023-11-17)
|
||||
* Support configuring images via RELATED_IMAGE_ environment variables ([#2355](https://github.com/jaegertracing/jaeger-operator/pull/2355), [@andreasgerstmayr](https://github.com/andreasgerstmayr))
|
||||
* Regenerate ES certificated when is close to 1 day for expire ([#2356](https://github.com/jaegertracing/jaeger-operator/pull/2356), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Bump actions/checkout from 3 to 4 ([#2316](https://github.com/jaegertracing/jaeger-operator/pull/2316), [@dependabot[bot]](https://github.com/apps/dependabot))
|
||||
* bump grpc to 1.58.3 ([#2346](https://github.com/jaegertracing/jaeger-operator/pull/2346), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Bump golang version to 1.21 ([#2347](https://github.com/jaegertracing/jaeger-operator/pull/2347), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Ensure oauth-proxy ImageStream is detected eventually ([#2340](https://github.com/jaegertracing/jaeger-operator/pull/2340), [@bverschueren](https://github.com/bverschueren))
|
||||
* Check if envFrom has ConfigMapRef set ([#2342](https://github.com/jaegertracing/jaeger-operator/pull/2342), [@edwardecook](https://github.com/edwardecook))
|
||||
* Bump golang.org/x/net from 0.13.0 to 0.17.0 ([#2343](https://github.com/jaegertracing/jaeger-operator/pull/2343), [@dependabot[bot]](https://github.com/apps/dependabot))
|
||||
* Fix issue related to new encoding in oauth-proxy image ([#2345](https://github.com/jaegertracing/jaeger-operator/pull/2345), [@iblancasa](https://github.com/iblancasa))
|
||||
* Always generate new oauth-proxy password ([#2333](https://github.com/jaegertracing/jaeger-operator/pull/2333), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Add v1.48.x and v1.49.x to the support map ([#2332](https://github.com/jaegertracing/jaeger-operator/pull/2332), [@ishaqkhattana](https://github.com/ishaqkhattana))
|
||||
* Pass proxy env vars to operands ([#2330](https://github.com/jaegertracing/jaeger-operator/pull/2330), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Protect auth delegator behind a mutex ([#2318](https://github.com/jaegertracing/jaeger-operator/pull/2318), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
## v1.49.1 (2023-09-07)
|
||||
* fix: protect the kafka-profision setting behind a mutex ([#2308](https://github.com/jaegertracing/jaeger-operator/pull/2308), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
## v1.48.1 (2023-09-04)
|
||||
* Use base image that does not require subscription (centos 9 stream) ([#2313](https://github.com/jaegertracing/jaeger-operator/pull/2313), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Update go dependencies to Kubernetes 0.28.1 ([#2301](https://github.com/jaegertracing/jaeger-operator/pull/2301), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Protect the ESProvisioning setting behind a mutex ([#2287](https://github.com/jaegertracing/jaeger-operator/pull/2287), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
## v1.48.0 (2023-08-28)
|
||||
|
||||
* Remove the TokenReview after checking we can create it ([#2286](https://github.com/jaegertracing/jaeger-operator/pull/2286), [@iblancasa](https://github.com/iblancasa))
|
||||
* Fix apiVersion and kind are missing in jaeger-operator generate output ([#2281](https://github.com/jaegertracing/jaeger-operator/pull/2281), [@hiteshwani29](https://github.com/hiteshwani29))
|
||||
* Fix custom labels for the deployable components in production strategy ([#2277](https://github.com/jaegertracing/jaeger-operator/pull/2277), [@hiteshwani29](https://github.com/hiteshwani29))
|
||||
* Ensure the OAuth Proxy image detection is run after the platform detection ([#2280](https://github.com/jaegertracing/jaeger-operator/pull/2280), [@iblancasa](https://github.com/iblancasa))
|
||||
* Added changes to respect env variable set from envFrom configMaps ([#2272](https://github.com/jaegertracing/jaeger-operator/pull/2272), [@hiteshwani29](https://github.com/hiteshwani29))
|
||||
* Refactor the autodetect module to reduce the number of writes/reads in viper configuration ([#2274](https://github.com/jaegertracing/jaeger-operator/pull/2274), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
## v1.47.0 (2023-07-12)
|
||||
* Expose admin ports for agent, collector, and query Deployments via the equivalent Service ([#2262](https://github.com/jaegertracing/jaeger-operator/pull/2262), [@thomaspaulin](https://github.com/thomaspaulin))
|
||||
* update otel sdk to v1.16.0/v0.39.0 ([#2261](https://github.com/jaegertracing/jaeger-operator/pull/2261), [@frzifus](https://github.com/frzifus))
|
||||
* Extended compatibility matrix ([#2255](https://github.com/jaegertracing/jaeger-operator/pull/2255), [@shazib-summar](https://github.com/shazib-summar))
|
||||
* Add support for Kubernetes 1.27 ([#2235](https://github.com/jaegertracing/jaeger-operator/pull/2235), [@iblancasa](https://github.com/iblancasa))
|
||||
* Jaeger Collector Config: `Lifecycle` and `TerminationGracePeriodSeconds` ([#2242](https://github.com/jaegertracing/jaeger-operator/pull/2242), [@taj-p](https://github.com/taj-p))
|
||||
|
||||
## v1.46.0 (2023-06-16)
|
||||
* Missing exposed port 16685 in query deployments ([#2239](https://github.com/jaegertracing/jaeger-operator/pull/2239), [@iblancasa](https://github.com/iblancasa))
|
||||
* Use Golang 1.20 ([#2205](https://github.com/jaegertracing/jaeger-operator/pull/2205), [@iblancasa](https://github.com/iblancasa))
|
||||
* [BugFix] Properly set imagePullPolicy and containerSecurityContext for EsIndexCleaner cronjob container ([#2224](https://github.com/jaegertracing/jaeger-operator/pull/2224), [@michalschott](https://github.com/michalschott))
|
||||
* Remove resource limitation for the operator pod ([#2221](https://github.com/jaegertracing/jaeger-operator/pull/2221), [@iblancasa](https://github.com/iblancasa))
|
||||
* Add PriorityClass for AllInOne strategy ([#2218](https://github.com/jaegertracing/jaeger-operator/pull/2218), [@sonofgibs](https://github.com/sonofgibs))
|
||||
|
||||
|
||||
## v1.45.0 (2023-05-16)
|
||||
|
||||
## v1.44.0 (2023-04-13)
|
||||
* Feat: add `NodeSelector` to jaeger collector, query, and ingestor ([#2200](https://github.com/jaegertracing/jaeger-operator/pull/2200), [@AhmedGrati](https://github.com/AhmedGrati))
|
||||
|
||||
## v1.43.0 (2023-02-07)
|
||||
* update operator-sdk to 1.27.0 ([#2178](https://github.com/jaegertracing/jaeger-operator/pull/2178), [@iblancasa](https://github.com/iblancasa))
|
||||
* Support JaegerCommonSpec in JaegerCassandraCreateSchemaSpec ([#2176](https://github.com/jaegertracing/jaeger-operator/pull/2176), [@haanhvu](https://github.com/haanhvu))
|
||||
|
||||
## v1.42.0 (2023-02-07)
|
||||
* Upgrade Kafka Operator default version to 0.32.0 ([#2150](https://github.com/jaegertracing/jaeger-operator/pull/2150), [@iblancasa](https://github.com/iblancasa))
|
||||
* Upgrade Kind, Kind images and add Kubernetes 1.26 ([#2161](https://github.com/jaegertracing/jaeger-operator/pull/2161), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
1.41.1 (2023-01-23)
|
||||
-------------------
|
||||
* Fix the Jaeger version for the Jaeger Operator 1.41.x ([#2157](https://github.com/jaegertracing/jaeger-operator/pull/2157), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
1.40.0 (2022-12-23)
|
||||
-------------------
|
||||
* Support e2e tests on multi architecture environment ([#2139](https://github.com/jaegertracing/jaeger-operator/pull/2139), [@jkandasa](https://github.com/jkandasa))
|
||||
* limit the get of deployments to WATCH_NAMESPACE on sync ([#2126](https://github.com/jaegertracing/jaeger-operator/pull/2126), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* choose first server address ([#2087](https://github.com/jaegertracing/jaeger-operator/pull/2087), [@Efrat19](https://github.com/Efrat19))
|
||||
* Fix query ingress when using streaming strategy ([#2120](https://github.com/jaegertracing/jaeger-operator/pull/2120), [@kevinearls](https://github.com/kevinearls))
|
||||
* Fix Liveness Probe for Ingester and Query ([#2122](https://github.com/jaegertracing/jaeger-operator/pull/2122), [@ricoberger](https://github.com/ricoberger))
|
||||
* Fix for min tls version to v1.2 ([#2119](https://github.com/jaegertracing/jaeger-operator/pull/2119), [@kangsheng89](https://github.com/kangsheng89))
|
||||
|
||||
1.39.0 (2022-11-03)
|
||||
-------------------
|
||||
* Fix: svc port doesnt match istio convention ([#2101](https://github.com/jaegertracing/jaeger-operator/pull/2101), [@frzifus](https://github.com/frzifus))
|
||||
|
||||
1.38.1 (2022-10-11)
|
||||
-------------------
|
||||
* Add ability to specify es proxy resources ([#2079](https://github.com/jaegertracing/jaeger-operator/pull/2079), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Fix: CVE-2022-27664 ([#2081](https://github.com/jaegertracing/jaeger-operator/pull/2081), [@albertlockett](https://github.com/albertlockett))
|
||||
* Add liveness and readiness probes to injected sidecar ([#2077](https://github.com/jaegertracing/jaeger-operator/pull/2077), [@MacroPower](https://github.com/MacroPower))
|
||||
* Add http- port prefix to follow istio naming conventions ([#2075](https://github.com/jaegertracing/jaeger-operator/pull/2075), [@cnvergence](https://github.com/cnvergence))
|
||||
|
||||
1.38.0 (2022-09-19)
|
||||
-------------------
|
||||
* added pathType to ingress ([#2066](https://github.com/jaegertracing/jaeger-operator/pull/2066), [@giautm](https://github.com/giautm))
|
||||
* set alias enable variable for spark cronjob ([#2061](https://github.com/jaegertracing/jaeger-operator/pull/2061), [@miyunari](https://github.com/miyunari))
|
||||
* migrate autoscaling v2beta2 to v2 for Kubernetes 1.26 ([#2055](https://github.com/jaegertracing/jaeger-operator/pull/2055), [@iblancasa](https://github.com/iblancasa))
|
||||
* add container security context support ([#2033](https://github.com/jaegertracing/jaeger-operator/pull/2033), [@mjnagel](https://github.com/mjnagel))
|
||||
* change verbosity level and message of the log for autoprovisioned kafka ([#2026](https://github.com/jaegertracing/jaeger-operator/pull/2026), [@iblancasa](https://github.com/iblancasa))
|
||||
|
||||
1.37.0 (2022-08-11)
|
||||
-------------------
|
||||
|
||||
* Upgrade operator-sdk to 1.22.2 ([#2021](https://github.com/jaegertracing/jaeger-operator/pull/2021), [@iblancasa](https://github.com/iblancasa))
|
||||
* es-dependencies: support image pull secret ([#2012](https://github.com/jaegertracing/jaeger-operator/pull/2012), [@frzifus](https://github.com/frzifus))
|
||||
|
||||
1.36.0 (2022-07-18)
|
||||
-------------------
|
||||
|
||||
* added flag to change webhook port ([#1991](https://github.com/jaegertracing/jaeger-operator/pull/1991), [@klubi](https://github.com/klubi))
|
||||
* Upgrade operator-sdk to 1.22.0 ([#1951](https://github.com/jaegertracing/jaeger-operator/pull/1951), [@iblancasa](https://github.com/iblancasa))
|
||||
* Add elasticsearch storage date format config. ([#1325](https://github.com/jaegertracing/jaeger-operator/pull/1325), [@sniperking1234](https://github.com/sniperking1234))
|
||||
* Add support for custom liveness probe ([#1605](https://github.com/jaegertracing/jaeger-operator/pull/1605), [@ricoberger](https://github.com/ricoberger))
|
||||
* Add service annotations ([#1526](https://github.com/jaegertracing/jaeger-operator/pull/1526), [@herbguo](https://github.com/herbguo))
|
||||
|
||||
1.35.0 (2022-06-16)
|
||||
-------------------
|
||||
|
||||
* fix: point to a newer openshift oauth image 4.12 ([#1955](https://github.com/jaegertracing/jaeger-operator/pull/1955), [@frzifus](https://github.com/frzifus))
|
||||
* Expose OTLP collector and allInOne ports ([#1948](https://github.com/jaegertracing/jaeger-operator/pull/1948), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Add support for ImagePullSecrets in cronjobs ([#1935](https://github.com/jaegertracing/jaeger-operator/pull/1935), [@alexandrevilain](https://github.com/alexandrevilain))
|
||||
* fix: ocp es rollover #1932 ([#1937](https://github.com/jaegertracing/jaeger-operator/pull/1937), [@frzifus](https://github.com/frzifus))
|
||||
* add kafkaSecretName for collector and ingester ([#1910](https://github.com/jaegertracing/jaeger-operator/pull/1910), [@luohua13](https://github.com/luohua13))
|
||||
* Add autoscalability E2E test for OpenShift ([#1936](https://github.com/jaegertracing/jaeger-operator/pull/1936), [@iblancasa](https://github.com/iblancasa))
|
||||
* Fix version in Docker container. ([#1924](https://github.com/jaegertracing/jaeger-operator/pull/1924), [@iblancasa](https://github.com/iblancasa))
|
||||
* Verify namespace permissions before adding ns controller ([#1914](https://github.com/jaegertracing/jaeger-operator/pull/1914), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* fix: skip dependencies on openshift platform ([#1921](https://github.com/jaegertracing/jaeger-operator/pull/1921), [@frzifus](https://github.com/frzifus))
|
||||
* fix: remove common name label ([#1920](https://github.com/jaegertracing/jaeger-operator/pull/1920), [@frzifus](https://github.com/frzifus))
|
||||
* Ignore not found error on 1.31.0 upgrade routine ([#1913](https://github.com/jaegertracing/jaeger-operator/pull/1913), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
|
||||
1.34.1 (2022-05-24)
|
||||
-------------------
|
||||
Fix: storage.es.tls.enabled flag not passed to es-index-cleaner ([#1896](https://github.com/jaegertracing/jaeger-operator/pull/1896), [@indigostar-kr](https://github.com/indigostar-kr))
|
||||
|
||||
1.34.0 (2022-05-18)
|
||||
-------------------
|
||||
* Fix: jaeger operator fails to parse Jaeger instance version ([#1885](https://github.com/jaegertracing/jaeger-operator/pull/1885), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Support Kubernetes 1.24 ([#1882](https://github.com/jaegertracing/jaeger-operator/pull/1882), [@iblancasa](https://github.com/iblancasa))
|
||||
* Cronjob migration ([#1856](https://github.com/jaegertracing/jaeger-operator/pull/1856), [@kevinearls](https://github.com/kevinearls))
|
||||
* Fix: setting default Istio annotation in Pod instead of Deployment ([#1860](https://github.com/jaegertracing/jaeger-operator/pull/1860), [@cnvergence](https://github.com/cnvergence))
|
||||
* Add http- prefix to port names in collector and agent services ([#1862](https://github.com/jaegertracing/jaeger-operator/pull/1862), [@cnvergence](https://github.com/cnvergence))
|
||||
|
||||
1.33.0 (2022-04-14)
|
||||
-------------------
|
||||
* Adding priority-class for esIndexCleaner ([#1732](https://github.com/jaegertracing/jaeger-operator/pull/1732), [@swapnilpotnis](https://github.com/swapnilpotnis))
|
||||
* Fix: webhook deadlock ([#1850](https://github.com/jaegertracing/jaeger-operator/pull/1850), [@frzifus](https://github.com/frzifus))
|
||||
* Fix: take namespace modifications into account ([#1839](https://github.com/jaegertracing/jaeger-operator/pull/1839), [@frzifus](https://github.com/frzifus))
|
||||
* Replace deployment reconciler with webhook ([#1828](https://github.com/jaegertracing/jaeger-operator/pull/1828), [@frzifus](https://github.com/frzifus))
|
||||
* Add managed by metric ([#1831](https://github.com/jaegertracing/jaeger-operator/pull/1831), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Fix admissionReviews version for operator-sdk upgrade ([#1827](https://github.com/jaegertracing/jaeger-operator/pull/1827), [@kevinearls](https://github.com/kevinearls))
|
||||
* Make RHOL Elasticsearch cert-management feature optional ([#1824](https://github.com/jaegertracing/jaeger-operator/pull/1824), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Update the operator-sdk to v1.17.0 ([#1825](https://github.com/jaegertracing/jaeger-operator/pull/1825), [@kevinearls](https://github.com/kevinearls))
|
||||
* Fix metrics selectors ([#1742](https://github.com/jaegertracing/jaeger-operator/pull/1742), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
|
||||
1.32.0 (2022-03-09)
|
||||
-------------------
|
||||
|
||||
* Custom Image Pull Policy ([#1798](https://github.com/jaegertracing/jaeger-operator/pull/1798), [@edenkoveshi](https://github.com/edenkoveshi))
|
||||
* add METRICS_STORAGE_TYPE for metrics query ([#1755](https://github.com/jaegertracing/jaeger-operator/pull/1755), [@JaredTan95](https://github.com/JaredTan95))
|
||||
* Make operator more resiliant to etcd defrag activity ([#1795](https://github.com/jaegertracing/jaeger-operator/pull/1795), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Automatically set num shards and replicas from referenced OCP ES ([#1737](https://github.com/jaegertracing/jaeger-operator/pull/1737), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* support image pull secrets ([#1740](https://github.com/jaegertracing/jaeger-operator/pull/1740), [@frzifus](https://github.com/frzifus))
|
||||
* Fix webhook secret cert name ([#1772](https://github.com/jaegertracing/jaeger-operator/pull/1772), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
|
||||
1.31.0 (2022-02-09)
|
||||
-------------------
|
||||
* Fix panic caused by an invalid type assertion ([#1738](https://github.com/jaegertracing/jaeger-operator/pull/1738), [@frzifus](https://github.com/frzifus))
|
||||
* Add ES autoprovisioning CR metric ([#1728](https://github.com/jaegertracing/jaeger-operator/pull/1728), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Use Elasticsearch provisioning from OpenShift Elasticsearch operator ([#1708](https://github.com/jaegertracing/jaeger-operator/pull/1708), [@pavolloffay](https://github.com/pavolloffay))
|
||||
|
||||
1.30.0 (2022-01-18)
|
||||
-------------------
|
||||
* Only expose the query-http[s] port in the OpenShift route ([#1719](https://github.com/jaegertracing/jaeger-operator/pull/1719), [@rkukura](https://github.com/rkukura))
|
||||
* Add CR Metrics for Jaeger Kind. ([#1706](https://github.com/jaegertracing/jaeger-operator/pull/1706), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Avoid calling k8s api for each resource kind on the cluster ([#1712](https://github.com/jaegertracing/jaeger-operator/pull/1712), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* First call of autodetect should be synchronous ([#1713](https://github.com/jaegertracing/jaeger-operator/pull/1713), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Add permissions for imagestreams ([#1714](https://github.com/jaegertracing/jaeger-operator/pull/1714), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Restore default metrics port to avoid breaking helm ([#1703](https://github.com/jaegertracing/jaeger-operator/pull/1703), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Add leases permissions to manifest. ([#1704](https://github.com/jaegertracing/jaeger-operator/pull/1704), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Change spark-dependencies image to GHCR ([#1701](https://github.com/jaegertracing/jaeger-operator/pull/1701), [@pavolloffay](https://github.com/pavolloffay))
|
||||
* Register ES types ([#1688](https://github.com/jaegertracing/jaeger-operator/pull/1688), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Add support for IBM Power (ppc64le) arch ([#1672](https://github.com/jaegertracing/jaeger-operator/pull/1672), [@Abhijit-Mane](https://github.com/Abhijit-Mane))
|
||||
* util.Truncate add the values to the truncated after the excess is 0 ([#1678](https://github.com/jaegertracing/jaeger-operator/pull/1678), [@mmatache](https://github.com/mmatache))
|
||||
|
||||
1.29.1 (2021-12-15)
|
||||
-------------------
|
||||
* Register oschema for openshift resources ([#1673](https://github.com/jaegertracing/jaeger-operator/pull/1673), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
|
||||
1.29.0 (2021-12-10)
|
||||
-------------------
|
||||
* Fix default namespace ([#1651](https://github.com/jaegertracing/jaeger-operator/pull/1651), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
* Fix finding the correct instance when there are multiple jaeger instances during injecting the sidecar ([#1639](https://github.com/jaegertracing/jaeger-operator/pull/1639), [@alibo](https://github.com/alibo))
|
||||
* Migrate to operator-sdk 1.13 ([#1623](https://github.com/jaegertracing/jaeger-operator/pull/1623), [@rubenvp8510](https://github.com/rubenvp8510))
|
||||
|
||||
1.28.0 (2021-11-08)
|
||||
-------------------
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
The following table shows the compatibility of Jaeger Operator with three different components: Kubernetes, Strimzi Operator, and Cert-Manager.
|
||||
|
||||
| Jaeger Operator | Kubernetes | Strimzi Operator | Cert-Manager |
|
||||
|-----------------|----------------|--------------------|--------------|
|
||||
| v1.62.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
|
||||
| v1.61.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
|
||||
| v1.60.x | v1.19 to v1.30 | v0.32 | v1.6.1 |
|
||||
| v1.59.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.58.x | skipped | skipped | skipped |
|
||||
| v1.57.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.56.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.55.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.54.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.53.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.52.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.51.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.50.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.49.x | v1.19 to v1.28 | v0.32 | v1.6.1 |
|
||||
| v1.48.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
|
||||
| v1.47.x | v1.19 to v1.27 | v0.32 | v1.6.1 |
|
||||
| v1.46.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
|
||||
| v1.45.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
|
||||
| v1.44.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
|
||||
| v1.43.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
|
||||
| v1.42.x | v1.19 to v1.26 | v0.32 | v1.6.1 |
|
||||
| v1.41.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
|
||||
| v1.40.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
|
||||
| v1.39.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
|
||||
| v1.38.x | v1.19 to v1.25 | v0.30 | v1.6.1 |
|
||||
| v1.37.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
|
||||
| v1.36.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
|
||||
| v1.35.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
|
||||
| v1.34.x | v1.19 to v1.24 | v0.23 | v1.6.1 |
|
||||
| v1.33.x | v1.19 to v1.23 | v0.23 | v1.6.1 |
|
217
CONTRIBUTING.md
217
CONTRIBUTING.md
|
@ -6,79 +6,96 @@ This project is [Apache 2.0 licensed](LICENSE) and accepts contributions via Git
|
|||
|
||||
We gratefully welcome improvements to documentation as well as to code.
|
||||
|
||||
## Getting Started
|
||||
|
||||
This project is a regular [Kubernetes Operator](https://coreos.com/operators/) built using the Operator SDK. Refer to the Operator SDK documentation to understand the basic architecture of this operator.
|
||||
|
||||
## Installing the Operator SDK command line tool
|
||||
### Installing the Operator SDK command line tool
|
||||
|
||||
Follow the installation guidelines from [Operator SDK GitHub page](https://github.com/operator-framework/operator-sdk)
|
||||
Follow the installation guidelines from [Operator SDK GitHub page](https://github.com/operator-framework/operator-sdk) or run `make install-sdk`.
|
||||
|
||||
## Developing
|
||||
### Developing
|
||||
|
||||
As usual for operators following the Operator SDK in recent versions, the dependencies are managed using [`go modules`](https://golang.org/doc/go1.11#modules). Refer to that project's documentation for instructions on how to add or update dependencies.
|
||||
|
||||
The first step is to get a local Kubernetes instance up and running. The recommended approach for development is using `minikube` with *ingress* enabled. Refer to the Kubernetes' [documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) for instructions on how to install it.
|
||||
The first step is to get a local Kubernetes instance up and running. The recommended approach is using `minikube`. Refer to the Kubernetes' [documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) for instructions on how to install it.
|
||||
|
||||
Once `minikube` is installed, it can be started with:
|
||||
```sh
|
||||
minikube start --addons=ingress
|
||||
|
||||
```
|
||||
minikube start
|
||||
```
|
||||
|
||||
NOTE: Make sure to read the documentation to learn the performance switches that can be applied to your platform.
|
||||
|
||||
Log into docker (or another image registry):
|
||||
```sh
|
||||
docker login --username <dockerusername>
|
||||
```
|
||||
|
||||
Once minikube has finished starting, get the Operator running:
|
||||
```sh
|
||||
make cert-manager
|
||||
IMG=docker.io/$USER/jaeger-operator:latest make generate bundle docker push deploy
|
||||
```
|
||||
|
||||
NOTE: If your registry username is not the same as $USER, modify the previous command before executing it. Also change *docker.io* if you are using a different image registry.
|
||||
```
|
||||
make run
|
||||
```
|
||||
|
||||
At this point, a Jaeger instance can be installed:
|
||||
```sh
|
||||
|
||||
```
|
||||
kubectl apply -f examples/simplest.yaml
|
||||
kubectl get jaegers
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
To verify the Jaeger instance is running, execute *minikube ip* and open that address in a browser, or follow the steps below
|
||||
```sh
|
||||
export MINIKUBE_IP=`minikube ip`
|
||||
curl http://{$MINIKUBE_IP}/api/services
|
||||
```
|
||||
NOTE: you may have to execute the *curl* command twice to get a non-empty result
|
||||
|
||||
Tests should be simple unit tests and/or end-to-end tests. For small changes, unit tests should be sufficient, but every new feature should be accompanied with end-to-end tests as well. Tests can be executed with:
|
||||
```sh
|
||||
make test
|
||||
```
|
||||
|
||||
#### Cleaning up
|
||||
To remove the instance:
|
||||
```sh
|
||||
|
||||
```
|
||||
kubectl delete -f examples/simplest.yaml
|
||||
```
|
||||
|
||||
Tests should be simple unit tests and/or end-to-end tests. For small changes, unit tests should be sufficient, but every new feature should be accompanied with end-to-end tests as well. Tests can be executed with:
|
||||
|
||||
```
|
||||
make test
|
||||
```
|
||||
|
||||
NOTE: you can adjust the Docker image namespace by overriding the variable `NAMESPACE`, like: `make test NAMESPACE=quay.io/my-username`. The full Docker image name can be customized by overriding `BUILD_IMAGE` instead, like: `make test BUILD_IMAGE=quay.io/my-username/jaeger-operator:0.0.1`
|
||||
|
||||
#### Model changes
|
||||
|
||||
The Operator SDK generates the `pkg/apis/jaegertracing/v1/zz_generated.*.go` files via the command `make generate`. This should be executed whenever there's a model change (`pkg/apis/jaegertracing/v1/jaeger_types.go`)
|
||||
|
||||
### Storage configuration
|
||||
#### Ingress configuration
|
||||
|
||||
Kubernetes comes with no ingress provider by default. For development purposes, when running `minikube`, the following command can be executed to install an ingress provider:
|
||||
|
||||
```
|
||||
make ingress
|
||||
```
|
||||
|
||||
This will install the `NGINX` ingress provider. It's recommended to wait for the ingress pods to be in the `READY` and `RUNNING` state before starting the operator. You can check it by running:
|
||||
|
||||
```
|
||||
kubectl get pods -n ingress-nginx
|
||||
```
|
||||
|
||||
To verify that it's working, deploy the `simplest.yaml` and check the ingress routes:
|
||||
|
||||
```
|
||||
$ kubectl apply -f examples/simplest.yaml
|
||||
jaeger.jaegertracing.io/simplest created
|
||||
$ kubectl get ingress
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
simplest-query * 192.168.122.69 80 12s
|
||||
```
|
||||
|
||||
Accessing the provided "address" in your web browser should display the Jaeger UI.
|
||||
|
||||
#### Storage configuration
|
||||
|
||||
There are a set of templates under the `test` directory that can be used to setup an Elasticsearch and/or Cassandra cluster. Alternatively, the following commands can be executed to install it:
|
||||
|
||||
```sh
|
||||
```
|
||||
make es
|
||||
make cassandra
|
||||
```
|
||||
|
||||
### Operator-Lifecycle-Manager Integration
|
||||
#### Operator-Lifecycle-Manager Integration
|
||||
|
||||
The [Operator-Lifecycle-Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager/) can install, manage, and upgrade operators and their dependencies in a cluster.
|
||||
|
||||
|
@ -90,22 +107,23 @@ With OLM, users can:
|
|||
|
||||
OLM also enforces some constraints on the components it manages in order to ensure a good user experience.
|
||||
|
||||
The Jaeger community provides and maintains a [ClusterServiceVersion (CSV) YAML](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md) to integrate with OLM.
|
||||
The Jaeger community provides and maintains a [ClusterServiceVersion (CSV) YAML](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/Documentation/design/building-your-csv.md/) to integrate with OLM.
|
||||
|
||||
Starting from operator-sdk v0.5.0, one can generate and update CSVs based on the yaml files in the deploy folder.
|
||||
The Jaeger CSV can be updated to version 1.9.0 with the following command:
|
||||
|
||||
```sh
|
||||
```
|
||||
$ operator-sdk generate csv --csv-version 1.9.0
|
||||
INFO[0000] Generating CSV manifest version 1.9.0
|
||||
INFO[0000] Create deploy/olm-catalog/jaeger-operator.csv.yaml
|
||||
INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml
|
||||
INFO[0000] Create deploy/olm-catalog/jaeger-operator.csv.yaml
|
||||
INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml
|
||||
```
|
||||
|
||||
The generated CSV yaml should then be compared and used to update the `deploy/olm-catalog/jaeger.clusterserviceversion.yaml` file which represents the stable version copied to the operatorhub following each jaeger operator release. Once merged, the `jaeger-operator.csv.yaml` file should be removed.
|
||||
The generated CSV yaml should then be compared and used to update the deploy/olm-catalog/jaeger.clusterserviceversion.yaml file which represents the stable version copied to the operatorhub following each jaeger operator release. Once merged, the jaeger-operator.csv.yaml file should be removed.
|
||||
|
||||
The `jaeger.clusterserviceversion.yaml` file can then be tested with this command:
|
||||
```sh
|
||||
The jaeger.clusterserviceversion.yaml file can then be tested with this command:
|
||||
|
||||
```
|
||||
$ operator-sdk scorecard --cr-manifest examples/simplest.yaml --csv-path deploy/olm-catalog/jaeger.clusterserviceversion.yaml --init-timeout 30
|
||||
Checking for existence of spec and status blocks in CR
|
||||
Checking that operator actions are reflected in status
|
||||
|
@ -128,113 +146,50 @@ OLM Integration:
|
|||
Total Score: 4/18 points
|
||||
```
|
||||
|
||||
## E2E tests
|
||||
#### E2E tests
|
||||
|
||||
### Requisites
|
||||
|
||||
Before running the E2E tests you need to install:
|
||||
|
||||
* [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation): a tool for running local Kubernetes clusters
|
||||
* [KUTTL](https://kuttl.dev/docs/cli.html#setup-the-kuttl-kubectl-plugin): a tool to run the Kubernetes tests
|
||||
|
||||
|
||||
### Runing the E2E tests
|
||||
|
||||
#### Using KIND cluster
|
||||
The whole set of end-to-end tests can be executed via:
|
||||
|
||||
```sh
|
||||
$ make run-e2e-tests
|
||||
```
|
||||
$ make e2e-tests
|
||||
```
|
||||
|
||||
The end-to-end tests are split into tags and can be executed in separate groups, such as:
|
||||
|
||||
```sh
|
||||
$ make run-e2e-tests-examples
|
||||
```
|
||||
$ make e2e-tests-smoke
|
||||
```
|
||||
|
||||
Other targets include `run-e2e-tests-cassandra` and `run-e2e-tests-elasticsearch`. You can list them running:
|
||||
```sh
|
||||
$ make e2e-test-suites
|
||||
Other targets include `e2e-tests-cassandra` and `e2e-tests-elasticsearch`. Refer to the `Makefile` for an up-to-date list of targets.
|
||||
|
||||
If you face issues like the one below, make sure you don't have any Jaeger instances (`kubectl get jaegers`) running nor Ingresses (`kubectl get ingresses`):
|
||||
|
||||
```
|
||||
--- FAIL: TestSmoke (316.59s)
|
||||
--- FAIL: TestSmoke/smoke (316.55s)
|
||||
--- FAIL: TestSmoke/smoke/daemonset (115.54s)
|
||||
...
|
||||
...
|
||||
daemonset.go:30: timed out waiting for the condition
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
**Note**: there are some variables you need to take into account in order to
|
||||
improve your experience running the E2E tests.
|
||||
##### Kuttl E2E tests
|
||||
|
||||
| Variable name | Description | Example usage |
|
||||
|-------------------|-----------------------------------------------------|------------------------------------|
|
||||
| KUTTL_OPTIONS | Options to pass directly to the KUTTL call | KUTTL_OPTIONS="--test es-rollover" |
|
||||
| E2E_TESTS_TIMEOUT | Timeout for each step in the E2E tests. In seconds | E2E_TESTS_TIMEOUT=500 |
|
||||
| USE_KIND_CLUSTER | Start a KIND cluster to run the E2E tests | USE_KIND_CLUSTER=true |
|
||||
| KIND_KEEP_CLUSTER | Not remove the KIND cluster after running the tests | KIND_KEEP_CLUSTER=true |
|
||||
There are some tests that uses [Kuttl](https://kuttl.dev/), those tests can be executed via:
|
||||
|
||||
Also, you can enable/disable the installation of the different operators needed
|
||||
to run the tests:
|
||||
| Variable name | Description | Example usage |
|
||||
|----------------|---------------------------------------------|---------------------|
|
||||
| JAEGER_OLM | Jaeger Operator was installed using OLM | JAEGER_OLM=true |
|
||||
| KAFKA_OLM | Kafka Operator was installed using OLM | KAFKA_OLM=true |
|
||||
| PROMETHEUS_OLM | Prometheus Operator was installed using OLM | PROMETHEUS_OLM=true |
|
||||
|
||||
#### An external cluster (like OpenShift)
|
||||
The commands from the previous section are valid when running the E2E tests in an
|
||||
external cluster like OpenShift, minikube or other Kubernetes environment. The only
|
||||
difference are:
|
||||
* You need to log in your Kubernetes cluster before running the E2E tests
|
||||
* You need to provide the `USE_KIND_CLUSTER=false` parameter when calling `make`
|
||||
|
||||
```sh
|
||||
$ make run-e2e-tests USE_KIND_CLUSTER=false
|
||||
```
|
||||
$ make kuttl-e2e
|
||||
```
|
||||
|
||||
### Developing new E2E tests
|
||||
|
||||
E2E tests are located under `tests/e2e`. Each folder is associated to an E2E test suite. The
|
||||
Tests are developed using KUTTL. Before developing a new test, [learn how KUTTL test works](https://kuttl.dev/docs/what-is-kuttl.html).
|
||||
|
||||
To add a new suite, it is needed to create a new folder with the name of the suite under `tests/e2e`.
|
||||
|
||||
Each suite folder contains:
|
||||
* `Makefile`: describes the rules associated to rendering the files needed for your tests and run the tests
|
||||
* `render.sh`: renders all the files needed for your tests (or to skip them)
|
||||
* A folder per test to run
|
||||
|
||||
When the test are rendered, each test folder is copied to `_build`. The files generated
|
||||
by `render.sh` are created under `_build/<test name>`.
|
||||
|
||||
##### Makefile
|
||||
The `Makefile` file must contain two rules:
|
||||
|
||||
```Makefile
|
||||
render-e2e-tests-<suite name>: set-assert-e2e-img-name
|
||||
./tests/e2e/<suite name>/render.sh
|
||||
|
||||
run-e2e-tests-<suite name>: TEST_SUITE_NAME=<suite name>
|
||||
run-e2e-tests-<suite name>: run-suite-tests
|
||||
```
|
||||
|
||||
Where `<suite name>` is the name of your E2E test suite. Your E2E test suite
|
||||
will be automatically indexed in the `run-e2e-tests` Makefile target.
|
||||
|
||||
##### render.sh
|
||||
|
||||
This file renders all the YAML files that are part of the E2E test. The `render.sh`
|
||||
file must start with:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
source $(dirname "$0")/../render-utils.sh
|
||||
```
|
||||
|
||||
The `render-utils.sh` file contains multiple functions to make easier to develop E2E tests and reuse logic. You can go to it and review the documentation of each one of the functions to
|
||||
understand their parameters and effects.
|
||||
You first need to install [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) in order to run the based kuttle e2e tests
|
||||
|
||||
#### Building [OCI Images](https://github.com/opencontainers/image-spec/blob/master/spec.md) for multiple arch (linux/arm64, linux/amd64)
|
||||
|
||||
OCI images could be built and published by [buildx](https://github.com/docker/buildx), it could be executed for local test via:
|
||||
|
||||
```sh
|
||||
```
|
||||
$ OPERATOR_VERSION=devel ./.ci/publish-images.sh
|
||||
```
|
||||
|
||||
|
@ -244,7 +199,7 @@ if we want to execute this in local env, need to setup buildx:
|
|||
|
||||
1. install docker cli plugin
|
||||
|
||||
```sh
|
||||
```
|
||||
$ export DOCKER_BUILDKIT=1
|
||||
$ docker build --platform=local -o . git://github.com/docker/buildx
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
|
@ -254,13 +209,13 @@ $ mv buildx ~/.docker/cli-plugins/docker-buildx
|
|||
|
||||
2. install qemu for multi arch
|
||||
|
||||
```sh
|
||||
```
|
||||
$ docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
```
|
||||
(via https://github.com/docker/buildx#building-multi-platform-images)
|
||||
|
||||
3. create a builder
|
||||
|
||||
```sh
|
||||
```
|
||||
$ docker buildx create --use --name builder
|
||||
```
|
||||
|
|
56
Dockerfile
56
Dockerfile
|
@ -1,56 +0,0 @@
|
|||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
COPY hack/install/install-dependencies.sh hack/install/
|
||||
COPY hack/install/install-utils.sh hack/install/
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN ./hack/install/install-dependencies.sh
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY apis/ apis/
|
||||
COPY cmd/ cmd/
|
||||
COPY controllers/ controllers/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
COPY versions.txt versions.txt
|
||||
|
||||
ARG JAEGER_VERSION
|
||||
ARG JAEGER_AGENT_VERSION
|
||||
ARG VERSION_PKG
|
||||
ARG VERSION
|
||||
ARG VERSION_DATE
|
||||
|
||||
# Dockerfile `FROM --platform=${BUILDPLATFORM}` means
|
||||
# prepare image for build for matched BUILDPLATFORM, eq. linux/amd64
|
||||
# by this way, we could avoid to using qemu, which slow down compiling process.
|
||||
# and usefully for language who support multi-arch build like go.
|
||||
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||
ARG TARGETARCH
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -ldflags="-X ${VERSION_PKG}.version=${VERSION} -X ${VERSION_PKG}.buildDate=${VERSION_DATE} -X ${VERSION_PKG}.defaultJaeger=${JAEGER_VERSION} -X ${VERSION_PKG}.defaultAgent=${JAEGER_AGENT_VERSION}" -a -o jaeger-operator main.go
|
||||
|
||||
FROM quay.io/centos/centos:stream9
|
||||
|
||||
ENV USER_UID=1001 \
|
||||
USER_NAME=jaeger-operator
|
||||
|
||||
RUN INSTALL_PKGS="openssl" && \
|
||||
dnf install -y $INSTALL_PKGS && \
|
||||
rpm -V $INSTALL_PKGS && \
|
||||
dnf clean all && \
|
||||
mkdir /tmp/_working_dir && \
|
||||
chmod og+w /tmp/_working_dir
|
||||
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/jaeger-operator .
|
||||
COPY scripts/cert_generation.sh scripts/cert_generation.sh
|
||||
|
||||
USER ${USER_UID}:${USER_UID}
|
||||
|
||||
ENTRYPOINT ["/jaeger-operator"]
|
|
@ -1,35 +1,26 @@
|
|||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22@sha256:f43c6f049f04cbbaeb28f0aad3eea15274a7d0a7899a617d0037aec48d7ab010 as builder
|
||||
FROM golang:1.16 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
WORKDIR /go/src/github.com/jaegertracing/jaeger-operator
|
||||
|
||||
# Download the dependencies. Doing this, if there are changes in the source
|
||||
# code but not in the dependencies to download, the tool to build the image will
|
||||
# use the cached dependencies
|
||||
COPY hack/install/install-dependencies.sh hack/install/
|
||||
COPY hack/install/install-utils.sh hack/install/
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN ./hack/install/install-dependencies.sh
|
||||
COPY go.mod /go/src/github.com/jaegertracing/jaeger-operator/go.mod
|
||||
COPY go.sum /go/src/github.com/jaegertracing/jaeger-operator/go.sum
|
||||
RUN go mod download
|
||||
|
||||
COPY tests tests
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
COPY . /go/src/github.com/jaegertracing/jaeger-operator/
|
||||
|
||||
# Build
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./uiconfig -a ./tests/assert-jobs/uiconfig/main.go
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./query -a ./tests/assert-jobs/query/main.go
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -o ./index -a ./tests/assert-jobs/index/main.go
|
||||
|
||||
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter -a ./tests/assert-jobs/reporter/main.go
|
||||
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./reporter-otlp -a ./tests/assert-jobs/reporter-otlp/main.go
|
||||
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./query -a ./tests/assert-jobs/query/main.go
|
||||
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o ./index -a ./tests/assert-jobs/index/main.go
|
||||
|
||||
# Use the curl container image to ensure we have curl installed. Also, it is a
|
||||
# minimal container image
|
||||
FROM curlimages/curl@sha256:94e9e444bcba979c2ea12e27ae39bee4cd10bc7041a472c4727a558e213744e6
|
||||
FROM scratch
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/reporter .
|
||||
COPY --from=builder /workspace/reporter-otlp .
|
||||
COPY --from=builder /workspace/query .
|
||||
COPY --from=builder /workspace/index .
|
||||
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/uiconfig .
|
||||
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/reporter .
|
||||
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/query .
|
||||
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/index .
|
||||
|
|
754
Makefile
754
Makefile
|
@ -1,221 +1,292 @@
|
|||
include tests/e2e/Makefile
|
||||
|
||||
# When the VERBOSE variable is set to 1, all the commands are shown
|
||||
ifeq ("$(VERBOSE)","true")
|
||||
ifeq ("$(VERBOSE)","1")
|
||||
echo_prefix=">>>>"
|
||||
else
|
||||
VECHO = @
|
||||
endif
|
||||
|
||||
VERSION_DATE ?= $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
|
||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x
|
||||
GOARCH ?= $(go env GOARCH)
|
||||
GOOS ?= $(go env GOOS)
|
||||
GO_FLAGS ?= GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 GO111MODULE=on
|
||||
GOPATH ?= "$(HOME)/go"
|
||||
GOROOT ?= "$(shell go env GOROOT)"
|
||||
KUBERNETES_CONFIG ?= "$(HOME)/.kube/config"
|
||||
WATCH_NAMESPACE ?= ""
|
||||
BIN_DIR ?= bin
|
||||
BIN_DIR ?= "build/_output/bin"
|
||||
IMPORT_LOG=import.log
|
||||
FMT_LOG=fmt.log
|
||||
ECHO ?= @echo $(echo_prefix)
|
||||
SED ?= "sed"
|
||||
# Jaeger Operator build variables
|
||||
OPERATOR_NAME ?= jaeger-operator
|
||||
IMG_PREFIX ?= quay.io/${USER}
|
||||
OPERATOR_VERSION ?= "$(shell grep -v '\#' versions.txt | grep operator | awk -F= '{print $$2}')"
|
||||
VERSION ?= "$(shell grep operator= versions.txt | awk -F= '{print $$2}')"
|
||||
IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}:${VERSION}
|
||||
BUNDLE_IMG ?= ${IMG_PREFIX}/${OPERATOR_NAME}-bundle:$(addprefix v,${VERSION})
|
||||
OUTPUT_BINARY ?= "$(BIN_DIR)/jaeger-operator"
|
||||
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
|
||||
export JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
|
||||
# agent was removed in jaeger 1.62.0, and the new versions of jaeger doesn't distribute the images anymore
|
||||
# for that reason the last version of the agent is 1.62.0 and is pined here so we can update jaeger and maintain
|
||||
# the latest agent image.
|
||||
export JAEGER_AGENT_VERSION ?= "1.62.0"
|
||||
|
||||
# Kafka and Kafka Operator variables
|
||||
OPERATOR_NAME ?= jaeger-operator
|
||||
NAMESPACE ?= "$(USER)"
|
||||
BUILD_IMAGE ?= "$(NAMESPACE)/$(OPERATOR_NAME):latest"
|
||||
IMAGE_TAGS ?= "--tag $(BUILD_IMAGE)"
|
||||
OUTPUT_BINARY ?= "$(BIN_DIR)/$(OPERATOR_NAME)"
|
||||
VERSION_PKG ?= "github.com/jaegertracing/jaeger-operator/pkg/version"
|
||||
JAEGER_VERSION ?= "$(shell grep jaeger= versions.txt | awk -F= '{print $$2}')"
|
||||
OPERATOR_VERSION ?= "$(shell git describe --tags)"
|
||||
STORAGE_NAMESPACE ?= "${shell kubectl get sa default -o jsonpath='{.metadata.namespace}' || oc project -q}"
|
||||
KAFKA_NAMESPACE ?= "kafka"
|
||||
KAFKA_VERSION ?= 0.32.0
|
||||
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/${KAFKA_VERSION}/examples/kafka/kafka-persistent-single.yaml"
|
||||
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${KAFKA_VERSION}/strimzi-cluster-operator-${KAFKA_VERSION}.yaml"
|
||||
# Prometheus Operator variables
|
||||
KAFKA_EXAMPLE ?= "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/0.23.0/examples/kafka/kafka-persistent-single.yaml"
|
||||
KAFKA_YAML ?= "https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.23.0/strimzi-cluster-operator-0.23.0.yaml"
|
||||
ES_OPERATOR_NAMESPACE ?= openshift-logging
|
||||
ES_OPERATOR_BRANCH ?= release-4.4
|
||||
ES_OPERATOR_IMAGE ?= quay.io/openshift/origin-elasticsearch-operator:4.4
|
||||
SDK_VERSION=v0.18.2
|
||||
ISTIO_VERSION ?= 1.11.2
|
||||
ISTIOCTL="./deploy/test/istio/bin/istioctl"
|
||||
GOPATH ?= "$(HOME)/go"
|
||||
GOROOT ?= "$(shell go env GOROOT)"
|
||||
|
||||
ECHO ?= @echo $(echo_prefix)
|
||||
SED ?= "sed"
|
||||
|
||||
PROMETHEUS_OPERATOR_TAG ?= v0.39.0
|
||||
PROMETHEUS_BUNDLE ?= https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/${PROMETHEUS_OPERATOR_TAG}/bundle.yaml
|
||||
# Metrics server variables
|
||||
METRICS_SERVER_TAG ?= v0.6.1
|
||||
METRICS_SERVER_YAML ?= https://github.com/kubernetes-sigs/metrics-server/releases/download/${METRICS_SERVER_TAG}/components.yaml
|
||||
# Ingress controller variables
|
||||
INGRESS_CONTROLLER_TAG ?= v1.0.1
|
||||
INGRESS_CONTROLLER_YAML ?= https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-${INGRESS_CONTROLLER_TAG}/deploy/static/provider/kind/deploy.yaml
|
||||
## Location to install tool dependencies
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
# Cert manager version to use
|
||||
CERTMANAGER_VERSION ?= 1.6.1
|
||||
CMCTL ?= $(LOCALBIN)/cmctl
|
||||
# Operator SDK
|
||||
OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
|
||||
OPERATOR_SDK_VERSION ?= 1.32.0
|
||||
# Minimum Kubernetes and OpenShift versions
|
||||
MIN_KUBERNETES_VERSION ?= 1.19.0
|
||||
MIN_OPENSHIFT_VERSION ?= 4.12
|
||||
# Use a KIND cluster for the E2E tests
|
||||
USE_KIND_CLUSTER ?= true
|
||||
# Is Jaeger Operator installed via OLM?
|
||||
JAEGER_OLM ?= false
|
||||
# Is Kafka Operator installed via OLM?
|
||||
KAFKA_OLM ?= false
|
||||
# Is Prometheus Operator installed via OLM?
|
||||
PROMETHEUS_OLM ?= false
|
||||
# Istio binary path and version
|
||||
ISTIOCTL ?= $(LOCALBIN)/istioctl
|
||||
# Tools
|
||||
CRDOC ?= $(LOCALBIN)/crdoc
|
||||
KIND ?= $(LOCALBIN)/kind
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
|
||||
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(OPERATOR_VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION)"
|
||||
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
UNIT_TEST_PACKAGES := $(shell go list ./cmd/... ./pkg/... | grep -v elasticsearch/v1 | grep -v kafka/v1beta2 | grep -v client/versioned)
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
GOBIN=$(shell go env GOPATH)/bin
|
||||
else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
TEST_OPTIONS = $(VERBOSE) -kubeconfig $(KUBERNETES_CONFIG) -namespacedMan ../../deploy/test/namespace-manifests.yaml -globalMan ../../deploy/test/global-manifests.yaml -root .
|
||||
|
||||
LD_FLAGS ?= "-X $(VERSION_PKG).version=$(VERSION) -X $(VERSION_PKG).buildDate=$(VERSION_DATE) -X $(VERSION_PKG).defaultJaeger=$(JAEGER_VERSION) -X $(VERSION_PKG).defaultAgent=$(JAEGER_AGENT_VERSION)"
|
||||
|
||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
ENVTEST_K8S_VERSION = 1.30
|
||||
# Options for KIND version to use
|
||||
export KUBE_VERSION ?= 1.30
|
||||
KUBE_VERSION ?= 1.21
|
||||
KIND_CONFIG ?= kind-$(KUBE_VERSION).yaml
|
||||
|
||||
SCORECARD_TEST_IMG ?= quay.io/operator-framework/scorecard-test:v$(OPERATOR_SDK_VERSION)
|
||||
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
# Options for 'bundle-build'
|
||||
ifneq ($(origin CHANNELS), undefined)
|
||||
BUNDLE_CHANNELS := --channels=$(CHANNELS)
|
||||
endif
|
||||
ifneq ($(origin DEFAULT_CHANNEL), undefined)
|
||||
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
|
||||
endif
|
||||
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:maxDescLen=0,generateEmbeddedObjectMeta=true"
|
||||
|
||||
# If we are running in CI, run go test in verbose mode
|
||||
ifeq (,$(CI))
|
||||
GOTEST_OPTS=
|
||||
else
|
||||
GOTEST_OPTS=-v
|
||||
endif
|
||||
|
||||
all: manager
|
||||
|
||||
.PHONY: check
|
||||
check: install-tools
|
||||
check:
|
||||
$(ECHO) Checking...
|
||||
$(VECHO)./.ci/format.sh > $(FMT_LOG)
|
||||
$(VECHO)GOPATH=${GOPATH} .ci/format.sh > $(FMT_LOG)
|
||||
$(VECHO)[ ! -s "$(FMT_LOG)" ] || (echo "Go fmt, license check, or import ordering failures, run 'make format'" | cat - $(FMT_LOG) && false)
|
||||
|
||||
ensure-generate-is-noop: VERSION=$(OPERATOR_VERSION)
|
||||
ensure-generate-is-noop: set-image-controller generate bundle
|
||||
$(VECHO)# on make bundle config/manager/kustomization.yaml includes changes, which should be ignored for the below check
|
||||
$(VECHO)git restore config/manager/kustomization.yaml
|
||||
$(VECHO)git diff -s --exit-code api/v1/zz_generated.*.go || (echo "Build failed: a model has been changed but the generated resources aren't up to date. Run 'make generate' and update your PR." && exit 1)
|
||||
$(VECHO)git diff -s --exit-code bundle config || (echo "Build failed: the bundle, config files has been changed but the generated bundle, config files aren't up to date. Run 'make bundle' and update your PR." && git diff && exit 1)
|
||||
$(VECHO)git diff -s --exit-code docs/api.md || (echo "Build failed: the api.md file has been changed but the generated api.md file isn't up to date. Run 'make api-docs' and update your PR." && git diff && exit 1)
|
||||
.PHONY: ensure-generate-is-noop
|
||||
ensure-generate-is-noop: generate format
|
||||
$(VECHO)git diff pkg/apis/jaegertracing/v1/zz_generated.*.go
|
||||
$(VECHO)git diff -s --exit-code pkg/apis/jaegertracing/v1/zz_generated.*.go || (echo "Build failed: a model has been changed but the generated resources aren't up to date. Run 'make generate' and update your PR." && exit 1)
|
||||
$(VECHO)git diff -s --exit-code pkg/client/versioned || (echo "Build failed: the versioned clients aren't up to date. Run 'make generate'." && exit 1)
|
||||
|
||||
|
||||
.PHONY: format
|
||||
format: install-tools
|
||||
format:
|
||||
$(ECHO) Formatting code...
|
||||
$(VECHO)./.ci/format.sh
|
||||
$(VECHO)GOPATH=${GOPATH} .ci/format.sh
|
||||
|
||||
PHONY: lint
|
||||
lint: install-tools
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(ECHO) Linting...
|
||||
$(VECHO)$(LOCALBIN)/golangci-lint -v run
|
||||
$(VECHO)GOPATH=${GOPATH} ./.ci/lint.sh
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
.PHONY: security
|
||||
security:
|
||||
$(ECHO) Security...
|
||||
$(VECHO)${GOPATH}/bin/gosec -quiet -exclude=G104 ./... 2>/dev/null
|
||||
|
||||
.PHONY: build
|
||||
build: format
|
||||
$(MAKE) gobuild
|
||||
|
||||
.PHONY: gobuild
|
||||
gobuild:
|
||||
$(ECHO) Building...
|
||||
$(VECHO)./hack/install/install-dependencies.sh
|
||||
$(VECHO)${GO_FLAGS} go build -ldflags $(LD_FLAGS) -o $(OUTPUT_BINARY) main.go
|
||||
$(VECHO)${GO_FLAGS} go build -o $(OUTPUT_BINARY) -ldflags $(LD_FLAGS)
|
||||
# compile the tests without running them
|
||||
$(VECHO)${GO_FLAGS} go test -c ./test/e2e/...
|
||||
|
||||
.PHONY: docker
|
||||
docker:
|
||||
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=TARGETARCH=$(GOARCH) --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} -t "$(IMG)" . ${DOCKER_BUILD_OPTIONS}
|
||||
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=TARGETARCH=$(GOARCH) --file build/Dockerfile -t "$(BUILD_IMAGE)" .
|
||||
|
||||
.PHONY: dockerx
|
||||
dockerx:
|
||||
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=VERSION=${VERSION} --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=JAEGER_AGENT_VERSION=${JAEGER_AGENT_VERSION} --build-arg=GOPROXY=${GOPROXY} --build-arg VERSION_DATE=${VERSION_DATE} --build-arg VERSION_PKG=${VERSION_PKG} --platform=$(PLATFORMS) $(IMAGE_TAGS) .
|
||||
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker buildx build --push --progress=plain --build-arg=JAEGER_VERSION=${JAEGER_VERSION} --build-arg=GOPROXY=${GOPROXY} --platform=$(PLATFORMS) --file build/Dockerfile $(IMAGE_TAGS) .
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
ifeq ($(CI),true)
|
||||
$(ECHO) Skipping push, as the build is running within a CI environment
|
||||
else
|
||||
$(ECHO) "Pushing image $(IMG)..."
|
||||
$(VECHO)docker push $(IMG) > /dev/null
|
||||
$(ECHO) "Pushing image $(BUILD_IMAGE)..."
|
||||
$(VECHO)docker push $(BUILD_IMAGE) > /dev/null
|
||||
endif
|
||||
|
||||
.PHONY: unit-tests
|
||||
unit-tests: envtest
|
||||
@echo Running unit tests...
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -p 1 ${GOTEST_OPTS} ./... -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
|
||||
unit-tests:
|
||||
$(ECHO) Running unit tests...
|
||||
$(VECHO)go test $(VERBOSE) $(UNIT_TEST_PACKAGES) -cover -coverprofile=cover.out -ldflags $(LD_FLAGS)
|
||||
|
||||
.PHONY: e2e-tests
|
||||
e2e-tests: prepare-e2e-tests e2e-tests-smoke e2e-tests-cassandra e2e-tests-es e2e-tests-self-provisioned-es e2e-tests-streaming e2e-tests-examples1 e2e-tests-examples2 e2e-tests-examples-openshift e2e-tests-generate
|
||||
|
||||
.PHONY: prepare-e2e-tests
|
||||
prepare-e2e-tests: build docker push
|
||||
$(VECHO)mkdir -p deploy/test
|
||||
$(VECHO)cp deploy/service_account.yaml deploy/test/namespace-manifests.yaml
|
||||
$(ECHO) "---" >> deploy/test/namespace-manifests.yaml
|
||||
|
||||
$(VECHO)cat deploy/role.yaml >> deploy/test/namespace-manifests.yaml
|
||||
$(ECHO) "---" >> deploy/test/namespace-manifests.yaml
|
||||
|
||||
$(VECHO)# ClusterRoleBinding is created in test codebase because we don't know service account namespace
|
||||
$(VECHO)cat deploy/role_binding.yaml >> deploy/test/namespace-manifests.yaml
|
||||
$(ECHO) "---" >> deploy/test/namespace-manifests.yaml
|
||||
|
||||
$(VECHO)${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml
|
||||
|
||||
$(VECHO)cp deploy/crds/jaegertracing.io_jaegers_crd.yaml deploy/test/global-manifests.yaml
|
||||
$(ECHO) "---" >> deploy/test/global-manifests.yaml
|
||||
$(VECHO)cat deploy/cluster_role.yaml >> deploy/test/global-manifests.yaml
|
||||
|
||||
.PHONY: e2e-tests-smoke
|
||||
e2e-tests-smoke: prepare-e2e-tests
|
||||
$(ECHO) Running Smoke end-to-end tests...
|
||||
$(VECHO)BUILD_IMAGE=$(BUILD_IMAGE) go test -tags=smoke ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-generate
|
||||
e2e-tests-generate: prepare-e2e-tests
|
||||
$(ECHO) Running generate end-to-end tests...
|
||||
$(VECHO)BUILD_IMAGE=$(BUILD_IMAGE) go test -tags=generate ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-cassandra
|
||||
e2e-tests-cassandra: prepare-e2e-tests cassandra
|
||||
$(ECHO) Running Cassandra end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) go test -tags=cassandra ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-es
|
||||
e2e-tests-es: prepare-e2e-tests es
|
||||
$(ECHO) Running Elasticsearch end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) go test -tags=elasticsearch ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-self-provisioned-es
|
||||
e2e-tests-self-provisioned-es: prepare-e2e-tests deploy-es-operator
|
||||
$(ECHO) Running Self provisioned Elasticsearch end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=self_provisioned_elasticsearch ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-self-provisioned-es-kafka
|
||||
e2e-tests-self-provisioned-es-kafka: prepare-e2e-tests deploy-kafka-operator deploy-es-operator
|
||||
$(ECHO) Running Self provisioned Elasticsearch and Kafka end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=self_provisioned_elasticsearch_kafka ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-token-propagation-es
|
||||
e2e-tests-token-propagation-es: prepare-e2e-tests deploy-es-operator
|
||||
$(ECHO) Running Token Propagation Elasticsearch end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) ES_OPERATOR_NAMESPACE=$(ES_OPERATOR_NAMESPACE) TEST_TIMEOUT=5 ES_OPERATOR_IMAGE=$(ES_OPERATOR_IMAGE) go test -tags=token_propagation_elasticsearch ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-streaming
|
||||
e2e-tests-streaming: prepare-e2e-tests es kafka
|
||||
$(ECHO) Running Streaming end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=streaming ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-examples1
|
||||
e2e-tests-examples1: prepare-e2e-tests cassandra
|
||||
$(ECHO) Running Example end-to-end tests part 1...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples1 ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-examples2
|
||||
e2e-tests-examples2: prepare-e2e-tests es kafka
|
||||
$(ECHO) Running Example end-to-end tests part 2...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples2 ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-examples-openshift
|
||||
e2e-tests-examples-openshift: prepare-e2e-tests deploy-es-operator
|
||||
$(ECHO) Running OpenShift Example end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=examples_openshift ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-autoscale
|
||||
e2e-tests-autoscale: prepare-e2e-tests es kafka
|
||||
$(ECHO) Running Autoscale end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=autoscale ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-multi-instance
|
||||
e2e-tests-multi-instance: prepare-e2e-tests es kafka
|
||||
$(ECHO) Running Multiple Instance end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=multiple ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-upgrade
|
||||
e2e-tests-upgrade: prepare-e2e-tests
|
||||
$(ECHO) Prepare next version image...
|
||||
$(VECHO)[ ! -z "$(PIPELINE)" ] || docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=JAEGER_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) --file build/Dockerfile -t "$(NAMESPACE)/$(OPERATOR_NAME):next" .
|
||||
BUILD_IMAGE="$(NAMESPACE)/$(OPERATOR_NAME):next" $(MAKE) push
|
||||
$(ECHO) Running Upgrade end-to-end tests...
|
||||
UPGRADE_TEST_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) go test -tags=upgrade ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: e2e-tests-istio
|
||||
e2e-tests-istio: prepare-e2e-tests istio
|
||||
$(ECHO) Running Istio end-to-end tests...
|
||||
$(VECHO)STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=istio ./test/e2e/... $(TEST_OPTIONS)
|
||||
|
||||
.PHONY: run
|
||||
run: crd
|
||||
$(VECHO)rm -rf /tmp/_cert*
|
||||
$(VECHO)POD_NAMESPACE=default OPERATOR_NAME=${OPERATOR_NAME} operator-sdk run local --watch-namespace="${WATCH_NAMESPACE}" --operator-flags "start ${CLI_FLAGS}" --go-ldflags ${LD_FLAGS}
|
||||
|
||||
.PHONY: run-debug
|
||||
run-debug: run
|
||||
run-debug: CLI_FLAGS = --log-level=debug --tracing-enabled=true
|
||||
|
||||
.PHONY: set-max-map-count
|
||||
set-max-map-count:
|
||||
# This is not required in OCP 4.1. The node tuning operator configures the property automatically
|
||||
# when label tuned.openshift.io/elasticsearch=true label is present on the ES pod. The label
|
||||
# is configured by ES operator.
|
||||
$(VECHO)minishift ssh -- 'sudo sysctl -w vm.max_map_count=262144' > /dev/null 2>&1 || true
|
||||
|
||||
.PHONY: set-node-os-linux
|
||||
set-node-os-linux:
|
||||
# Elasticsearch requires labeled nodes. These labels are by default present in OCP 4.2
|
||||
$(VECHO)kubectl label nodes --all kubernetes.io/os=linux --overwrite
|
||||
|
||||
cert-manager: cmctl
|
||||
# Consider using cmctl to install the cert-manager once install command is not experimental
|
||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v${CERTMANAGER_VERSION}/cert-manager.yaml
|
||||
$(CMCTL) check api --wait=5m
|
||||
.PHONY: deploy-es-operator
|
||||
deploy-es-operator: set-node-os-linux set-max-map-count deploy-prometheus-operator
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skipping es-operator deployment, assuming it has been installed via OperatorHub
|
||||
else
|
||||
$(VECHO)kubectl create namespace ${ES_OPERATOR_NAMESPACE} 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/01-service-account.yaml -n ${ES_OPERATOR_NAMESPACE}
|
||||
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/02-role.yaml
|
||||
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/03-role-bindings.yaml
|
||||
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/04-crd.yaml -n ${ES_OPERATOR_NAMESPACE}
|
||||
$(VECHO)kubectl apply -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/05-deployment.yaml -n ${ES_OPERATOR_NAMESPACE}
|
||||
$(VECHO)kubectl set image deployment/elasticsearch-operator elasticsearch-operator=${ES_OPERATOR_IMAGE} -n ${ES_OPERATOR_NAMESPACE}
|
||||
endif
|
||||
|
||||
undeploy-cert-manager:
|
||||
kubectl delete --ignore-not-found=true -f https://github.com/jetstack/cert-manager/releases/download/v${CERTMANAGER_VERSION}/cert-manager.yaml
|
||||
|
||||
cmctl: $(CMCTL)
|
||||
$(CMCTL): $(LOCALBIN)
|
||||
./hack/install/install-cmctl.sh $(CERTMANAGER_VERSION)
|
||||
.PHONY: undeploy-es-operator
|
||||
undeploy-es-operator:
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skipping es-operator undeployment, as it should have been installed via OperatorHub
|
||||
else
|
||||
$(VECHO)kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/05-deployment.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/04-crd.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/03-role-bindings.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/02-role.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f https://raw.githubusercontent.com/openshift/elasticsearch-operator/${ES_OPERATOR_BRANCH}/manifests/01-service-account.yaml -n ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete namespace ${ES_OPERATOR_NAMESPACE} --ignore-not-found=true 2>&1 || true
|
||||
endif
|
||||
|
||||
.PHONY: es
|
||||
es: storage
|
||||
ifeq ($(SKIP_ES_EXTERNAL),true)
|
||||
$(ECHO) Skipping creation of external Elasticsearch instance
|
||||
else
|
||||
$(VECHO)kubectl create -f ./tests/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl create -f ./test/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
endif
|
||||
|
||||
.PHONY: istio
|
||||
istio:
|
||||
$(ECHO) Install istio with minimal profile
|
||||
$(VECHO)./hack/install/install-istio.sh
|
||||
$(VECHO)mkdir -p deploy/test
|
||||
$(VECHO)[ -f "${ISTIOCTL}" ] || (curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} TARGET_ARCH=x86_64 sh - && mv ./istio-${ISTIO_VERSION} ./deploy/test/istio)
|
||||
$(VECHO)${ISTIOCTL} install --set profile=minimal -y
|
||||
|
||||
.PHONY: undeploy-istio
|
||||
undeploy-istio:
|
||||
$(VECHO)${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f - || true
|
||||
$(VECHO)[ -f "${ISTIOCTL}" ] && (${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f -) || true
|
||||
$(VECHO)kubectl delete namespace istio-system --ignore-not-found=true || true
|
||||
$(VECHO)rm -rf deploy/test/istio
|
||||
|
||||
.PHONY: cassandra
|
||||
cassandra: storage
|
||||
$(VECHO)kubectl create -f ./tests/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl create -f ./test/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
|
||||
.PHONY: storage
|
||||
storage:
|
||||
|
@ -226,28 +297,28 @@ storage:
|
|||
deploy-kafka-operator:
|
||||
$(ECHO) Creating namespace $(KAFKA_NAMESPACE)
|
||||
$(VECHO)kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
ifeq ($(KAFKA_OLM),true)
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skipping kafka-operator deployment, assuming it has been installed via OperatorHub
|
||||
else
|
||||
$(VECHO)curl --fail --location https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.32.0/strimzi-0.32.0.tar.gz --output tests/_build/kafka-operator.tar.gz --create-dirs
|
||||
$(VECHO)tar xf tests/_build/kafka-operator.tar.gz
|
||||
$(VECHO)${SED} -i 's/namespace: .*/namespace: ${KAFKA_NAMESPACE}/' strimzi-${KAFKA_VERSION}/install/cluster-operator/*RoleBinding*.yaml
|
||||
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/020-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
|
||||
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/023-RoleBinding-strimzi-cluster-operator.yaml -n ${KAFKA_NAMESPACE}
|
||||
$(VECHO)kubectl create -f strimzi-${KAFKA_VERSION}/install/cluster-operator/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml -n ${KAFKA_NAMESPACE}
|
||||
$(VECHO)kubectl apply -f strimzi-${KAFKA_VERSION}/install/cluster-operator/ -n ${KAFKA_NAMESPACE}
|
||||
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-namespaced --clusterrole=strimzi-cluster-operator-namespaced --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --clusterrole=strimzi-entity-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl create clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --clusterrole=strimzi-topic-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)curl --fail --location $(KAFKA_YAML) --output deploy/test/kafka-operator.yaml --create-dirs
|
||||
$(VECHO)${SED} 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)kubectl set env deployment strimzi-cluster-operator -n ${KAFKA_NAMESPACE} STRIMZI_NAMESPACE="*"
|
||||
endif
|
||||
|
||||
.PHONY: undeploy-kafka-operator
|
||||
undeploy-kafka-operator:
|
||||
ifeq ($(KAFKA_OLM),true)
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skiping kafka-operator undeploy
|
||||
else
|
||||
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f tests/_build/kafka-operator.yaml --ignore-not-found=true 2>&1 || true
|
||||
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f deploy/test/kafka-operator.yaml --ignore-not-found=true 2>&1 || true
|
||||
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-namespaced --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --ignore-not-found=true || true
|
||||
endif
|
||||
$(VECHO)kubectl delete namespace $(KAFKA_NAMESPACE) --ignore-not-found=true 2>&1 || true
|
||||
|
||||
.PHONY: kafka
|
||||
kafka: deploy-kafka-operator
|
||||
|
@ -255,22 +326,21 @@ ifeq ($(SKIP_KAFKA),true)
|
|||
$(ECHO) Skipping Kafka/external ES related tests
|
||||
else
|
||||
$(ECHO) Creating namespace $(KAFKA_NAMESPACE)
|
||||
$(VECHO)mkdir -p tests/_build/
|
||||
$(VECHO)kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)curl --fail --location $(KAFKA_EXAMPLE) --output tests/_build/kafka-example.yaml --create-dirs
|
||||
$(VECHO)${SED} -i 's/size: 100Gi/size: 10Gi/g' tests/_build/kafka-example.yaml
|
||||
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=client -f tests/_build/kafka-example.yaml
|
||||
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply -f tests/_build/kafka-example.yaml 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)curl --fail --location $(KAFKA_EXAMPLE) --output deploy/test/kafka-example.yaml --create-dirs
|
||||
$(VECHO)${SED} -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml
|
||||
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=true -f deploy/test/kafka-example.yaml
|
||||
$(VECHO)kubectl -n $(KAFKA_NAMESPACE) apply -f deploy/test/kafka-example.yaml 2>&1 | grep -v "already exists" || true
|
||||
endif
|
||||
|
||||
.PHONY: undeploy-kafka
|
||||
undeploy-kafka: undeploy-kafka-operator
|
||||
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f tests/_build/kafka-example.yaml 2>&1 || true
|
||||
$(VECHO)kubectl delete --namespace $(KAFKA_NAMESPACE) -f deploy/test/kafka-example.yaml 2>&1 || true
|
||||
|
||||
|
||||
.PHONY: deploy-prometheus-operator
|
||||
deploy-prometheus-operator:
|
||||
ifeq ($(PROMETHEUS_OLM),true)
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skipping prometheus-operator deployment, assuming it has been installed via OperatorHub
|
||||
else
|
||||
$(VECHO)kubectl apply -f ${PROMETHEUS_BUNDLE}
|
||||
|
@ -278,58 +348,77 @@ endif
|
|||
|
||||
.PHONY: undeploy-prometheus-operator
|
||||
undeploy-prometheus-operator:
|
||||
ifeq ($(PROMETHEUS_OLM),true)
|
||||
ifeq ($(OLM),true)
|
||||
$(ECHO) Skipping prometheus-operator undeployment, as it should have been installed via OperatorHub
|
||||
else
|
||||
$(VECHO)kubectl delete -f ${PROMETHEUS_BUNDLE} --ignore-not-found=true || true
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean: undeploy-kafka undeploy-prometheus-operator undeploy-istio undeploy-cert-manager
|
||||
$(VECHO)kubectl delete namespace $(KAFKA_NAMESPACE) --ignore-not-found=true 2>&1 || true
|
||||
$(VECHO)if [ -d tests/_build ]; then rm -rf tests/_build ; fi
|
||||
$(VECHO)kubectl delete -f ./tests/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
|
||||
$(VECHO)kubectl delete -f ./tests/elasticsearch.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
|
||||
clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator undeploy-istio
|
||||
$(VECHO)rm -f deploy/test/*.yaml
|
||||
$(VECHO)if [ -d deploy/test ]; then rmdir deploy/test ; fi
|
||||
$(VECHO)kubectl delete -f ./test/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
|
||||
$(VECHO)kubectl delete -f ./test/elasticsearch.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
|
||||
$(VECHO)kubectl delete -f deploy/crds/jaegertracing.io_jaegers_crd.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f deploy/operator.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f deploy/role_binding.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f deploy/role.yaml --ignore-not-found=true || true
|
||||
$(VECHO)kubectl delete -f deploy/service_account.yaml --ignore-not-found=true || true
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
.PHONY: crd
|
||||
crd:
|
||||
$(VECHO)kubectl create -f deploy/crds/jaegertracing.io_jaegers_crd.yaml 2>&1 | grep -v "already exists" || true
|
||||
|
||||
.PHONY: ingress
|
||||
ingress:
|
||||
$(VECHO)minikube addons enable ingress
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen api-docs ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
generate: internal-generate format
|
||||
|
||||
.PHONY: internal-generate
|
||||
internal-generate:
|
||||
$(VECHO)GOPATH=${GOPATH} GOROOT=${GOROOT} ./.ci/generate.sh
|
||||
|
||||
.PHONY: test
|
||||
test: unit-tests run-e2e-tests
|
||||
test: unit-tests e2e-tests
|
||||
|
||||
.PHONY: all
|
||||
all: check format lint build test
|
||||
all: check format lint security build test
|
||||
|
||||
.PHONY: ci
|
||||
ci: install-tools ensure-generate-is-noop check format lint build unit-tests
|
||||
ci: ensure-generate-is-noop check format lint security build unit-tests
|
||||
|
||||
##@ Deployment
|
||||
.PHONY: scorecard
|
||||
scorecard:
|
||||
$(VECHO)operator-sdk scorecard --cr-manifest deploy/examples/simplest.yaml --csv-path deploy/olm-catalog/jaeger.clusterserviceversion.yaml --init-timeout 30
|
||||
|
||||
ignore-not-found ?= false
|
||||
.PHONY: install-sdk
|
||||
install-sdk:
|
||||
$(ECHO) Installing SDK ${SDK_VERSION}
|
||||
$(VECHO)SDK_VERSION=$(SDK_VERSION) GOPATH=$(GOPATH) ./.ci/install-sdk.sh
|
||||
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
$(VECHO)${GO_FLAGS} ./.ci/vgot.sh \
|
||||
golang.org/x/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
github.com/securego/gosec/cmd/gosec@v0.0.0-20191008095658-28c1128b7336 \
|
||||
sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0 \
|
||||
k8s.io/code-generator/cmd/client-gen@v0.18.6 \
|
||||
k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20200410145947-61e04a5be9a6
|
||||
./.ci/install-gomplate.sh
|
||||
|
||||
.PHONY: install
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
install: install-sdk install-tools
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
kubectl create namespace observability 2>&1 | grep -v "already exists" || true
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
./hack/enable-operator-features.sh
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
deploy: ingress crd
|
||||
$(VECHO)kubectl apply -f deploy/service_account.yaml
|
||||
$(VECHO)kubectl apply -f deploy/cluster_role.yaml
|
||||
$(VECHO)kubectl apply -f deploy/cluster_role_binding.yaml
|
||||
$(VECHO)${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f -
|
||||
|
||||
.PHONY: operatorhub
|
||||
operatorhub: check-operatorhub-pr-template
|
||||
|
@ -340,179 +429,166 @@ check-operatorhub-pr-template:
|
|||
$(VECHO)curl https://raw.githubusercontent.com/operator-framework/community-operators/master/docs/pull_request_template.md -o .ci/.operatorhub-pr-template.md -s > /dev/null 2>&1
|
||||
$(VECHO)git diff -s --exit-code .ci/.operatorhub-pr-template.md || (echo "Build failed: the PR template for OperatorHub has changed. Sync it and try again." && exit 1)
|
||||
|
||||
.PHONY: local-jaeger-container
|
||||
local-jaeger-container:
|
||||
$(ECHO) "Starting local container with Jaeger. Check http://localhost:16686"
|
||||
$(VECHO)docker run -d --rm -p 16686:16686 -p 6831:6831/udp --name jaeger jaegertracing/all-in-one:1.22 > /dev/null
|
||||
|
||||
.PHONY: changelog
|
||||
changelog:
|
||||
$(ECHO) "Set env variable OAUTH_TOKEN before invoking, https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token"
|
||||
$(VECHO)docker run --rm -v "${PWD}:/app" pavolloffay/gch:latest --oauth-token ${OAUTH_TOKEN} --branch main --owner jaegertracing --repo jaeger-operator
|
||||
$(VECHO)docker run --rm -v "${PWD}:/app" pavolloffay/gch:latest --oauth-token ${OAUTH_TOKEN} --owner jaegertracing --repo jaeger-operator
|
||||
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(VECHO)./hack/install/install-controller-gen.sh
|
||||
# e2e tests using kuttl
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
test -s $(ENVTEST) || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
kuttl:
|
||||
ifeq (, $(shell which kubectl-kuttl))
|
||||
echo ${PATH}
|
||||
ls -l /usr/local/bin
|
||||
which kubectl-kuttl
|
||||
|
||||
.PHONY: bundle
|
||||
bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
|
||||
$(SED) -i "s#containerImage: quay.io/jaegertracing/jaeger-operator:$(OPERATOR_VERSION)#containerImage: quay.io/jaegertracing/jaeger-operator:$(VERSION)#g" config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
|
||||
$(SED) -i 's/minKubeVersion: .*/minKubeVersion: $(MIN_KUBERNETES_VERSION)/' config/manifests/bases/jaeger-operator.clusterserviceversion.yaml
|
||||
$(SED) -i 's/com.redhat.openshift.versions=.*/com.redhat.openshift.versions=v$(MIN_OPENSHIFT_VERSION)/' bundle.Dockerfile
|
||||
$(SED) -i 's/com.redhat.openshift.versions: .*/com.redhat.openshift.versions: v$(MIN_OPENSHIFT_VERSION)/' bundle/metadata/annotations.yaml
|
||||
|
||||
$(OPERATOR_SDK) generate kustomize manifests -q
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
|
||||
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --manifests --version $(VERSION) $(BUNDLE_METADATA_OPTS)
|
||||
$(OPERATOR_SDK) bundle validate ./bundle
|
||||
./hack/ignore-createdAt-bundle.sh
|
||||
|
||||
.PHONY: bundle-build
|
||||
bundle-build: ## Build the bundle image.
|
||||
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
|
||||
|
||||
.PHONY: bundle-push
|
||||
bundle-push: ## Push the bundle image.
|
||||
docker push $(BUNDLE_IMG)
|
||||
|
||||
.PHONY: opm
|
||||
OPM = ./bin/opm
|
||||
opm: ## Download opm locally if necessary.
|
||||
ifeq (,$(wildcard $(OPM)))
|
||||
ifeq (,$(shell which opm 2>/dev/null))
|
||||
@{ \
|
||||
$(VECHO){ \
|
||||
set -e ;\
|
||||
mkdir -p $(dir $(OPM)) ;\
|
||||
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
|
||||
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\
|
||||
chmod +x $(OPM) ;\
|
||||
echo "" ;\
|
||||
echo "ERROR: kuttl not found." ;\
|
||||
echo "Please check https://kuttl.dev/docs/cli.html for installation instructions and try again." ;\
|
||||
echo "" ;\
|
||||
exit 1 ;\
|
||||
}
|
||||
else
|
||||
OPM = $(shell which opm)
|
||||
endif
|
||||
KUTTL=$(shell which kubectl-kuttl)
|
||||
endif
|
||||
|
||||
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
|
||||
# These images MUST exist in a registry and be pull-able.
|
||||
BUNDLE_IMGS ?= $(BUNDLE_IMG)
|
||||
|
||||
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
|
||||
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
|
||||
|
||||
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
|
||||
ifneq ($(origin CATALOG_BASE_IMG), undefined)
|
||||
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
|
||||
endif
|
||||
|
||||
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
|
||||
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
|
||||
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
|
||||
.PHONY: catalog-build
|
||||
catalog-build: opm ## Build a catalog image.
|
||||
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
|
||||
|
||||
# Push the catalog image.
|
||||
.PHONY: catalog-push
|
||||
catalog-push: ## Push a catalog image.
|
||||
$(MAKE) docker-push IMG=$(CATALOG_IMG)
|
||||
|
||||
.PHONY: start-kind
|
||||
start-kind: kind
|
||||
ifeq ($(USE_KIND_CLUSTER),true)
|
||||
$(ECHO) Starting KIND cluster...
|
||||
# Instead of letting KUTTL create the Kind cluster (using the CLI or in the kuttl-tests.yaml
|
||||
# file), the cluster is created here. There are multiple reasons to do this:
|
||||
# * The kubectl command will not work outside KUTTL
|
||||
# * Some KUTTL versions are not able to start properly a Kind cluster
|
||||
# * The cluster will be removed after running KUTTL (this can be disabled). Sometimes,
|
||||
# the cluster teardown is not done properly and KUTTL can not be run with the --start-kind flag
|
||||
# When the Kind cluster is not created by Kuttl, the kindContainers parameter
|
||||
# from kuttl-tests.yaml has not effect so, it is needed to load the container
|
||||
# images here.
|
||||
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
|
||||
# Install metrics-server for HPA
|
||||
$(ECHO)"Installing the metrics-server in the kind cluster"
|
||||
$(VECHO)kubectl apply -f $(METRICS_SERVER_YAML)
|
||||
$(VECHO)kubectl patch deployment -n kube-system metrics-server --type "json" -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": --kubelet-insecure-tls}]'
|
||||
# Install the ingress-controller
|
||||
$(ECHO)"Installing the Ingress controller in the kind cluster"
|
||||
$(VECHO)kubectl apply -f $(INGRESS_CONTROLLER_YAML)
|
||||
# Check the deployments were done properly
|
||||
$(ECHO)"Checking the metrics-server was deployed properly"
|
||||
$(VECHO)kubectl wait --for=condition=available deployment/metrics-server -n kube-system --timeout=5m
|
||||
$(ECHO)"Checking the Ingress controller deployment was done successfully"
|
||||
$(VECHO)kubectl wait --for=condition=available deployment ingress-nginx-controller -n ingress-nginx --timeout=5m
|
||||
kind:
|
||||
ifeq (, $(shell which kind))
|
||||
$(VECHO){ \
|
||||
set -e ;\
|
||||
echo "" ;\
|
||||
echo "ERROR: kind not found." ;\
|
||||
echo "Please check https://kind.sigs.k8s.io/docs/user/quick-start/#installation for installation instructions and try again." ;\
|
||||
echo "" ;\
|
||||
exit 1 ;\
|
||||
}
|
||||
else
|
||||
$(ECHO)"KIND cluster creation disabled. Skipping..."
|
||||
KIND=$(shell which kind)
|
||||
endif
|
||||
|
||||
stop-kind:
|
||||
$(ECHO)"Stopping the kind cluster"
|
||||
$(VECHO)kind delete cluster
|
||||
.PHONY: prepare-e2e-kuttl-tests
|
||||
prepare-e2e-kuttl-tests: BUILD_IMAGE="local/jaeger-operator:e2e"
|
||||
prepare-e2e-kuttl-tests: build docker build-assert-job
|
||||
$(VECHO)mkdir -p tests/_build/manifests
|
||||
$(VECHO)mkdir -p tests/_build/crds
|
||||
|
||||
.PHONY: install-git-hooks
|
||||
$(VECHO)cp deploy/service_account.yaml tests/_build/manifests/01-jaeger-operator.yaml
|
||||
$(ECHO) "---" >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
|
||||
$(VECHO)cat deploy/role.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
$(ECHO) "---" >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
|
||||
$(VECHO)cat deploy/cluster_role.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
$(ECHO) "---" >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
|
||||
$(VECHO)${SED} "s~namespace: .*~namespace: jaeger-operator-system~gi" deploy/cluster_role_binding.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
$(ECHO) "---" >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
|
||||
$(VECHO)${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml >> tests/_build/manifests/01-jaeger-operator.yaml
|
||||
$(VECHO)${SED} "s~imagePullPolicy: Always~imagePullPolicy: Never~gi" tests/_build/manifests/01-jaeger-operator.yaml -i
|
||||
$(VECHO)${SED} "0,/fieldPath: metadata.namespace/s/fieldPath: metadata.namespace/fieldPath: metadata.annotations['olm.targetNamespaces']/gi" tests/_build/manifests/01-jaeger-operator.yaml -i
|
||||
|
||||
$(VECHO)cp deploy/crds/jaegertracing.io_jaegers_crd.yaml tests/_build/crds/jaegertracing.io_jaegers_crd.yaml
|
||||
$(VECHO)docker pull jaegertracing/vertx-create-span:operator-e2e-tests
|
||||
$(VECHO)docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.6
|
||||
|
||||
# This is needed for the generate test
|
||||
$(VECHO)@JAEGER_VERSION=${JAEGER_VERSION} gomplate -f tests/e2e/generate/jaeger-template.yaml.template -o tests/e2e/generate/jaeger-deployment.yaml
|
||||
# This is needed for the upgrade test
|
||||
$(VECHO)docker build --build-arg=GOPROXY=${GOPROXY} --build-arg=JAEGER_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) --file build/Dockerfile -t "local/jaeger-operator:next" .
|
||||
$(VECHO)JAEGER_VERSION=${JAEGER_VERSION} gomplate -f tests/e2e/upgrade/deployment-assert.yaml.template -o tests/e2e/upgrade/00-assert.yaml
|
||||
$(VECHO)JAEGER_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) gomplate -f tests/e2e/upgrade/deployment-assert.yaml.template -o tests/e2e/upgrade/01-assert.yaml
|
||||
$(VECHO)JAEGER_VERSION=${JAEGER_VERSION} gomplate -f tests/e2e/upgrade/deployment-assert.yaml.template -o tests/e2e/upgrade/02-assert.yaml
|
||||
$(VECHO)${SED} "s~local/jaeger-operator:e2e~local/jaeger-operator:next~gi" tests/_build/manifests/01-jaeger-operator.yaml > tests/e2e/upgrade/operator-upgrade.yaml
|
||||
# examples-agent-as-daemonset
|
||||
$(VECHO)gomplate -f examples/agent-as-daemonset.yaml -o tests/e2e/examples-agent-as-daemonset/00-install.yaml
|
||||
$(VECHO)JAEGER_NAME=agent-as-daemonset gomplate -f tests/templates/allinone-jaeger-assert.yaml.template -o tests/e2e/examples-agent-as-daemonset/00-assert.yaml
|
||||
$(VECHO)JAEGER_SERVICE=agent-as-daemonset JAEGER_OPERATION=smoketestoperation JAEGER_NAME=agent-as-daemonset gomplate -f tests/templates/smoke-test.yaml.template -o tests/e2e/examples-agent-as-daemonset/02-smoke-test.yaml
|
||||
$(VECHO)gomplate -f tests/templates/smoke-test-assert.yaml.template -o tests/e2e/examples-agent-as-daemonset/02-assert.yaml
|
||||
# examples-with-cassandra
|
||||
$(VECHO)gomplate -f tests/templates/cassandra-install.yaml.template -o tests/e2e/examples-with-cassandra/00-install.yaml
|
||||
$(VECHO)gomplate -f tests/templates/cassandra-assert.yaml.template -o tests/e2e/examples-with-cassandra/00-assert.yaml
|
||||
$(VECHO)gomplate -f examples/with-cassandra.yaml -o tests/e2e/examples-with-cassandra/01-install.yaml
|
||||
$(VECHO)${SED} -i "s~cassandra.default.svc~cassandra~gi" tests/e2e/examples-with-cassandra/01-install.yaml
|
||||
$(VECHO)JAEGER_NAME=with-cassandra gomplate -f tests/templates/allinone-jaeger-assert.yaml.template -o tests/e2e/examples-with-cassandra/01-assert.yaml
|
||||
$(VECHO)JAEGER_SERVICE=with-cassandra JAEGER_OPERATION=smoketestoperation JAEGER_NAME=with-cassandra gomplate -f tests/templates/smoke-test.yaml.template -o tests/e2e/examples-with-cassandra/02-smoke-test.yaml
|
||||
$(VECHO)gomplate -f tests/templates/smoke-test-assert.yaml.template -o tests/e2e/examples-with-cassandra/02-assert.yaml
|
||||
# examples-business-application-injected-sidecar
|
||||
$(VECHO)cat examples/business-application-injected-sidecar.yaml tests/e2e/examples-business-application-injected-sidecar/livenessProbe.yaml > tests/e2e/examples-business-application-injected-sidecar/00-install.yaml
|
||||
$(VECHO)gomplate -f examples/simplest.yaml -o tests/e2e/examples-business-application-injected-sidecar/01-install.yaml
|
||||
$(VECHO)JAEGER_NAME=simplest gomplate -f tests/templates/allinone-jaeger-assert.yaml.template -o tests/e2e/examples-business-application-injected-sidecar/01-assert.yaml
|
||||
$(VECHO)JAEGER_SERVICE=simplest JAEGER_OPERATION=smoketestoperation JAEGER_NAME=simplest gomplate -f tests/templates/smoke-test.yaml.template -o tests/e2e/examples-business-application-injected-sidecar/02-smoke-test.yaml
|
||||
$(VECHO)gomplate -f tests/templates/smoke-test-assert.yaml.template -o tests/e2e/examples-business-application-injected-sidecar/02-assert.yaml
|
||||
# istio
|
||||
$(VECHO)cat examples/business-application-injected-sidecar.yaml tests/e2e/istio/livelinessprobe.template > tests/e2e/istio/03-install.yaml
|
||||
# cassandra
|
||||
$(VECHO)gomplate -f tests/templates/cassandra-install.yaml.template -o tests/e2e/cassandra/00-install.yaml
|
||||
$(VECHO)gomplate -f tests/templates/cassandra-assert.yaml.template -o tests/e2e/cassandra/00-assert.yaml
|
||||
$(VECHO)INSTANCE_NAME=with-cassandra gomplate -f tests/templates/cassandra-jaeger-install.yaml.template -o tests/e2e/cassandra/01-install.yaml
|
||||
$(VECHO)INSTANCE_NAME=with-cassandra gomplate -f tests/templates/cassandra-jaeger-assert.yaml.template -o tests/e2e/cassandra/01-assert.yaml
|
||||
# cassandra spark
|
||||
$(VECHO) gomplate -f tests/templates/cassandra-install.yaml.template -o tests/e2e/cassandra-spark/00-install.yaml
|
||||
$(VECHO) gomplate -f tests/templates/cassandra-assert.yaml.template -o tests/e2e/cassandra-spark/00-assert.yaml
|
||||
$(VECHO)INSTANCE_NAME=test-spark-deps DEP_SCHEDULE=true CASSANDRA_MODE=prod gomplate -f tests/templates/cassandra-jaeger-install.yaml.template -o tests/e2e/cassandra-spark/01-install.yaml
|
||||
# es-spark-dependencies
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-install.yaml.template -o tests/e2e/es-spark-dependencies/00-install.yaml
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-assert.yaml.template -o tests/e2e/es-spark-dependencies/00-assert.yaml
|
||||
# es-simple-prod
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-install.yaml.template -o tests/e2e/es-simple-prod/00-install.yaml
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-assert.yaml.template -o tests/e2e/es-simple-prod/00-assert.yaml
|
||||
$(VECHO)JAEGER_NAME=simple-prod gomplate -f tests/templates/production-jaeger-install.yaml.template -o tests/e2e/es-simple-prod/01-install.yaml
|
||||
$(VECHO)JAEGER_NAME=simple-prod gomplate -f tests/templates/production-jaeger-assert.yaml.template -o tests/e2e/es-simple-prod/01-assert.yaml
|
||||
$(VECHO)JAEGER_SERVICE=simple-prod JAEGER_OPERATION=smoketestoperation JAEGER_NAME=simple-prod gomplate -f tests/templates/smoke-test.yaml.template -o tests/e2e/es-simple-prod/02-smoke-test.yaml
|
||||
$(VECHO)gomplate -f tests/templates/smoke-test-assert.yaml.template -o tests/e2e/es-simple-prod/02-assert.yaml
|
||||
# es-index-cleaner
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-install.yaml.template -o tests/e2e/es-index-cleaner/00-install.yaml
|
||||
$(VECHO)gomplate -f tests/templates/elasticsearch-assert.yaml.template -o tests/e2e/es-index-cleaner/00-assert.yaml
|
||||
$(VECHO)JAEGER_NAME=test-es-index-cleaner-with-prefix gomplate -f tests/templates/production-jaeger-install.yaml.template -o tests/e2e/es-index-cleaner/jaeger-deployment
|
||||
$(VECHO)gomplate -f tests/e2e/es-index-cleaner/es-index.template -o tests/e2e/es-index-cleaner/es-index
|
||||
$(VECHO)cat tests/e2e/es-index-cleaner/jaeger-deployment tests/e2e/es-index-cleaner/es-index >> tests/e2e/es-index-cleaner/01-install.yaml
|
||||
$(VECHO)JAEGER_NAME=test-es-index-cleaner-with-prefix gomplate -f tests/templates/production-jaeger-assert.yaml.template -o tests/e2e/es-index-cleaner/01-assert.yaml
|
||||
$(VECHO)$(SED) "s~enabled: false~enabled: true~gi" tests/e2e/es-index-cleaner/01-install.yaml > tests/e2e/es-index-cleaner/03-install.yaml
|
||||
$(VECHO)gomplate -f tests/e2e/es-index-cleaner/01-install.yaml -o tests/e2e/es-index-cleaner/05-install.yaml
|
||||
$(VECHO)PREFIX=my-prefix gomplate -f tests/e2e/es-index-cleaner/es-index.template -o tests/e2e/es-index-cleaner/es-index2
|
||||
$(VECHO)cat tests/e2e/es-index-cleaner/jaeger-deployment tests/e2e/es-index-cleaner/es-index2 >> tests/e2e/es-index-cleaner/07-install.yaml
|
||||
$(VECHO)$(SED) "s~enabled: false~enabled: true~gi" tests/e2e/es-index-cleaner/07-install.yaml > tests/e2e/es-index-cleaner/09-install.yaml
|
||||
$(VECHO)gomplate -f tests/e2e/es-index-cleaner/04-wait-es-index-cleaner.yaml -o tests/e2e/es-index-cleaner/11-wait-es-index-cleaner.yaml
|
||||
$(VECHO)gomplate -f tests/e2e/es-index-cleaner/05-install.yaml -o tests/e2e/es-index-cleaner/12-install.yaml
|
||||
|
||||
|
||||
# end-to-tests
|
||||
.PHONY: kuttl-e2e
|
||||
kuttl-e2e: prepare-e2e-kuttl-tests start-kind run-kuttl-e2e
|
||||
|
||||
.PHONY: run-kuttl-e2e
|
||||
run-kuttl-e2e:
|
||||
$(VECHO)$(KUTTL) test
|
||||
|
||||
start-kind:
|
||||
$(VECHO)kind create cluster --config $(KIND_CONFIG)
|
||||
$(VECHO)kind load docker-image local/jaeger-operator:e2e
|
||||
$(VECHO)kind load docker-image local/asserts:e2e
|
||||
$(VECHO)kind load docker-image jaegertracing/vertx-create-span:operator-e2e-tests
|
||||
$(VECHO)kind load docker-image local/jaeger-operator:next
|
||||
$(VECHO)kind load docker-image docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.6
|
||||
|
||||
.PHONY: build-assert-job
|
||||
build-assert-job:
|
||||
$(VECHO)docker build -t local/asserts:e2e -f Dockerfile.asserts .
|
||||
$(VECHO)docker build -t local/asserts:e2e -f Dockerfile.asserts .
|
||||
|
||||
.PHONY: build-assert-job
|
||||
install-git-hooks:
|
||||
$(VECHO)cp scripts/git-hooks/pre-commit .git/hooks
|
||||
|
||||
# Generates the released manifests
|
||||
release-artifacts: set-image-controller
|
||||
mkdir -p dist
|
||||
$(KUSTOMIZE) build config/default -o dist/jaeger-operator.yaml
|
||||
|
||||
# Set the controller image parameters
|
||||
set-image-controller: manifests kustomize
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
|
||||
.PHONY: tools
|
||||
tools: kustomize controller-gen operator-sdk
|
||||
|
||||
.PHONY: install-tools
|
||||
install-tools: operator-sdk
|
||||
$(VECHO)./hack/install/install-golangci-lint.sh
|
||||
$(VECHO)./hack/install/install-goimports.sh
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE)
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
./hack/install/install-kustomize.sh
|
||||
|
||||
.PHONY: kind
|
||||
kind: $(KIND)
|
||||
$(KIND): $(LOCALBIN)
|
||||
./hack/install/install-kind.sh
|
||||
|
||||
.PHONY: prepare-release
|
||||
prepare-release:
|
||||
$(VECHO)./.ci/prepare-release.sh
|
||||
|
||||
scorecard-tests: operator-sdk
|
||||
echo "Operator sdk is $(OPERATOR_SDK)"
|
||||
$(OPERATOR_SDK) scorecard bundle -w 10m || (echo "scorecard test failed" && exit 1)
|
||||
|
||||
scorecard-tests-local: kind
|
||||
$(VECHO)$(KIND) create cluster --config $(KIND_CONFIG) 2>&1 | grep -v "already exists" || true
|
||||
$(VECHO)docker pull $(SCORECARD_TEST_IMG)
|
||||
$(VECHO)$(KIND) load docker-image $(SCORECARD_TEST_IMG)
|
||||
$(VECHO)kubectl wait --timeout=5m --for=condition=available deployment/coredns -n kube-system
|
||||
$(VECHO)$(MAKE) scorecard-tests
|
||||
|
||||
.PHONY: operator-sdk
|
||||
operator-sdk: $(OPERATOR_SDK)
|
||||
$(OPERATOR_SDK): $(LOCALBIN)
|
||||
test -s $(OPERATOR_SDK) || curl -sLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v${OPERATOR_SDK_VERSION}/operator-sdk_`go env GOOS`_`go env GOARCH`
|
||||
@chmod +x $(OPERATOR_SDK)
|
||||
|
||||
api-docs: crdoc kustomize
|
||||
@{ \
|
||||
set -e ;\
|
||||
TMP_DIR=$$(mktemp -d) ; \
|
||||
$(KUSTOMIZE) build config/crd -o $$TMP_DIR/crd-output.yaml ;\
|
||||
$(CRDOC) --resources $$TMP_DIR/crd-output.yaml --output docs/api.md ;\
|
||||
}
|
||||
|
||||
.PHONY: crdoc
|
||||
crdoc: $(CRDOC)
|
||||
$(CRDOC): $(LOCALBIN)
|
||||
test -s $(CRDOC) || GOBIN=$(LOCALBIN) go install fybrik.io/crdoc@v0.5.2
|
||||
@chmod +x $(CRDOC)
|
||||
|
|
23
PROJECT
23
PROJECT
|
@ -1,23 +0,0 @@
|
|||
domain: jaegertracing.io
|
||||
layout:
|
||||
- go.kubebuilder.io/v3
|
||||
multigroup: true
|
||||
plugins:
|
||||
manifests.sdk.operatorframework.io/v2: {}
|
||||
scorecard.sdk.operatorframework.io/v2: {}
|
||||
projectName: jaeger-operator
|
||||
repo: github.com/jaegertracing/jaeger-operator
|
||||
resources:
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
controller: true
|
||||
domain: jaegertracing.io
|
||||
kind: Jaeger
|
||||
path: github.com/jaegertracing/jaeger-operator/apis/v1
|
||||
version: v1
|
||||
webhooks:
|
||||
defaulting: true
|
||||
validation: true
|
||||
webhookVersion: v1
|
||||
version: "3"
|
174
README.md
174
README.md
|
@ -1,4 +1,5 @@
|
|||
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc] [](https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger-operator)
|
||||
|
||||
[![Build Status][ci-img]][ci] [![Go Report Card][goreport-img]][goreport] [![Code Coverage][cov-img]][cov] [![GoDoc][godoc-img]][godoc]
|
||||
|
||||
# Jaeger Operator for Kubernetes
|
||||
|
||||
|
@ -8,7 +9,23 @@ The Jaeger Operator is an implementation of a [Kubernetes Operator](https://kube
|
|||
|
||||
Firstly, ensure an [ingress-controller is deployed](https://kubernetes.github.io/ingress-nginx/deploy/). When using `minikube`, you can use the `ingress` add-on: `minikube start --addons=ingress`
|
||||
|
||||
Then follow the Jaeger Operator [installation instructions](https://www.jaegertracing.io/docs/latest/operator/).
|
||||
To install the operator, run:
|
||||
```
|
||||
kubectl create namespace observability
|
||||
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/crds/jaegertracing.io_jaegers_crd.yaml
|
||||
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/service_account.yaml
|
||||
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/role.yaml
|
||||
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/role_binding.yaml
|
||||
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/operator.yaml
|
||||
```
|
||||
|
||||
The operator will activate extra features if given cluster-wide permissions. To enable that, run:
|
||||
```
|
||||
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/cluster_role.yaml
|
||||
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/cluster_role_binding.yaml
|
||||
```
|
||||
|
||||
Note that you'll need to download and customize the `cluster_role_binding.yaml` if you are using a namespace other than `observability`. You probably also want to download and customize the `operator.yaml`, setting the env var `WATCH_NAMESPACE` to have an empty value, so that it can watch for instances across all namespaces.
|
||||
|
||||
Once the `jaeger-operator` deployment in the namespace `observability` is ready, create a Jaeger instance, like:
|
||||
|
||||
|
@ -33,11 +50,16 @@ In this example, the Jaeger UI is available at http://192.168.122.34.
|
|||
|
||||
The official documentation for the Jaeger Operator, including all its customization options, are available under the main [Jaeger Documentation](https://www.jaegertracing.io/docs/latest/operator/).
|
||||
|
||||
CRD-API documentation can be found [here](./docs/api.md).
|
||||
|
||||
## Compatibility matrix
|
||||
|
||||
See the compatibility matrix [here](./COMPATIBILITY.md).
|
||||
The following table shows the compatibility of jaeger operator with different components, in this particular case we shows Kubernetes and Strimzi operator compatibility
|
||||
|
||||
|
||||
| Jaeger Operator | Kubernetes | Strimzi Operator |
|
||||
|-----------------|----------------------|---------------------
|
||||
| v1.24 | v1.19, v1.20, v1.21 | v0.23 |
|
||||
| v1.23 | v1.19, v1.20, v1.21 | v0.19, v0.20 |
|
||||
| v1.22 | v1.18 to v1.20 | v0.19 |
|
||||
|
||||
|
||||
### Jaeger Operator vs. Jaeger
|
||||
|
@ -64,159 +86,27 @@ The jaeger Operator *might* work on other untested versions of Strimzi Operator,
|
|||
|
||||
## (experimental) Generate Kubernetes manifest file
|
||||
|
||||
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml) to the namespace `jaeger-test`:
|
||||
Sometimes it is preferable to generate plain manifests files instead of running an operator in a cluster. `jaeger-operator generate` generates kubernetes manifests from a given CR. In this example we apply the manifest generated by [examples/simplest.yaml](https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/examples/simplest.yaml) to the namespace `jaeger-test`:
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/main/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:main generate | kubectl apply -n jaeger-test -f -
|
||||
curl https://raw.githubusercontent.com/jaegertracing/jaeger-operator/master/deploy/examples/simplest.yaml | docker run -i --rm jaegertracing/jaeger-operator:master generate | kubectl apply -n jaeger-test -f -
|
||||
```
|
||||
|
||||
It is recommended to deploy the operator instead of generating a static manifest.
|
||||
|
||||
## Jaeger V2 Operator
|
||||
|
||||
As the Jaeger V2 is released, it is decided that Jaeger V2 will deployed on Kubernetes using [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator). This will benefit both the users of Jaeger and OpenTelemetry. To use Jaeger V2 with OpenTelemetry Operator, the steps are as follows:
|
||||
|
||||
* Install the cert-manager in the existing cluster with the command:
|
||||
```bash
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
|
||||
```
|
||||
|
||||
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `cert-manager` namespace.
|
||||
|
||||
* Install the OpenTelemetry Operator by running:
|
||||
```bash
|
||||
kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
|
||||
```
|
||||
|
||||
Please verify all the resources (e.g., Pods and Deployments) are in a ready state in the `opentelemetry-operator-system` namespace.
|
||||
|
||||
### Using Jaeger with in-memory storage
|
||||
|
||||
Once all the resources are ready, create a Jaeger instance as follows:
|
||||
```yaml
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: opentelemetry.io/v1beta1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: jaeger-inmemory-instance
|
||||
spec:
|
||||
image: jaegertracing/jaeger:latest
|
||||
ports:
|
||||
- name: jaeger
|
||||
port: 16686
|
||||
config:
|
||||
service:
|
||||
extensions: [jaeger_storage, jaeger_query]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
exporters: [jaeger_storage_exporter]
|
||||
extensions:
|
||||
jaeger_query:
|
||||
storage:
|
||||
traces: memstore
|
||||
jaeger_storage:
|
||||
backends:
|
||||
memstore:
|
||||
memory:
|
||||
max_traces: 100000
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
exporters:
|
||||
jaeger_storage_exporter:
|
||||
trace_storage: memstore
|
||||
EOF
|
||||
```
|
||||
|
||||
To use the in-memory storage ui for Jaeger V2, expose the pod, deployment or the service as follows:
|
||||
```bash
|
||||
kubectl port-forward deployment/jaeger-inmemory-instance-collector 8080:16686
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
```bash
|
||||
kubectl port-forward service/jaeger-inmemory-instance-collector 8080:16686
|
||||
```
|
||||
|
||||
Once done, type `localhost:8080` in the browser to interact with the UI.
|
||||
|
||||
[Note] There's an ongoing development in OpenTelemetry Operator where users will be able to interact directly with the UI.
|
||||
|
||||
### Using Jaeger with database to store traces
|
||||
To use Jaeger V2 with the supported database, it is mandatory to create database deployments and they should be in `ready` state [(ref)](https://www.jaegertracing.io/docs/2.0/storage/).
|
||||
|
||||
Create a Kubernetes Service that exposes the database pods enabling communication between the database and Jaeger pods.
|
||||
|
||||
This can be achieved by creating a service in two ways, first by creating it [manually](https://kubernetes.io/docs/concepts/services-networking/service/) or second by creating it using imperative command.
|
||||
|
||||
```bash
|
||||
kubectl expose pods <pod-name> --port=<port-number> --name=<name-of-the-service>
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
```bash
|
||||
kubectl expose deployment <deployment-name> --port=<port-number> --name=<name-of-the-service>
|
||||
```
|
||||
|
||||
After the service is created, add the name of the service as an endpoint in their respective config as follows:
|
||||
|
||||
* [Cassandra DB](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-cassandra.yaml):
|
||||
```yaml
|
||||
jaeger_storage:
|
||||
backends:
|
||||
some_storage:
|
||||
cassandra:
|
||||
connection:
|
||||
servers: [<name-of-the-service>]
|
||||
```
|
||||
|
||||
* [ElasticSearch](https://github.com/jaegertracing/jaeger/blob/main/cmd/jaeger/config-elasticsearch.yaml):
|
||||
```yaml
|
||||
jaeger_storage:
|
||||
backends:
|
||||
some_storage:
|
||||
elasticseacrh:
|
||||
servers: [<name-of-the-service>]
|
||||
```
|
||||
|
||||
Use the modified config to create Jaeger instance with the help of OpenTelemetry Operator.
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: opentelemetry.io/v1beta1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: jaeger-storage-instance # name of your choice
|
||||
spec:
|
||||
image: jaegertracing/jaeger:latest
|
||||
ports:
|
||||
- name: jaeger
|
||||
port: 16686
|
||||
config:
|
||||
# modified config
|
||||
EOF
|
||||
```
|
||||
|
||||
## Contributing and Developing
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## License
|
||||
|
||||
|
||||
[Apache 2.0 License](./LICENSE).
|
||||
|
||||
[ci-img]: https://github.com/jaegertracing/jaeger-operator/workflows/CI%20Workflow/badge.svg
|
||||
[ci]: https://github.com/jaegertracing/jaeger-operator/actions
|
||||
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-operator/branch/main/graph/badge.svg
|
||||
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-operator/branch/master/graph/badge.svg
|
||||
[cov]: https://codecov.io/github/jaegertracing/jaeger-operator/
|
||||
[goreport-img]: https://goreportcard.com/badge/github.com/jaegertracing/jaeger-operator
|
||||
[goreport]: https://goreportcard.com/report/github.com/jaegertracing/jaeger-operator
|
||||
[godoc-img]: https://godoc.org/github.com/jaegertracing/jaeger-operator?status.svg
|
||||
[godoc]: https://godoc.org/github.com/jaegertracing/jaeger-operator/apis/v1#JaegerSpec
|
||||
[godoc]: https://godoc.org/github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1#JaegerSpec
|
||||
|
|
99
RELEASE.md
99
RELEASE.md
|
@ -1,5 +1,42 @@
|
|||
# Releasing the Jaeger Operator for Kubernetes
|
||||
|
||||
Steps to release a new version of the Jaeger Operator:
|
||||
|
||||
|
||||
1. Change the `versions.txt `so that it lists the target version of the Jaeger (if it is required). Don't touch the operator version it will be changed automatically in the next step.
|
||||
|
||||
1. Run `make prepare-release OPERATOR_VERSION=1.28.0`, using the operator version that will be released.
|
||||
|
||||
1. Prepare a changelog since last release.
|
||||
|
||||
1. Commit the changes and create a pull request:
|
||||
|
||||
```
|
||||
git commit -sm "Preparing release v1.28.0"
|
||||
```
|
||||
|
||||
1. Once the changes above are merged and available in `master` tag it with the desired version, prefixed with `v`, eg. `v1.28.0`
|
||||
|
||||
```
|
||||
git checkout master
|
||||
git tag v1.28.0
|
||||
git push git@github.com:jaegertracing/jaeger-operator.git v1.28.0
|
||||
```
|
||||
|
||||
1. The GitHub Workflow will take it from here, creating a GitHub release and publishing the images
|
||||
|
||||
1. After the release, PRs needs to be created against the Operator Hub Community Operators repositories:
|
||||
|
||||
* One for the [upstream-community-operators](https://github.com/k8s-operatorhub/community-operators), used by OLM on Kubernetes.
|
||||
* One for the [community-operators](https://github.com/redhat-openshift-ecosystem/community-operators-prod) used by OpenShift.
|
||||
|
||||
This can be done with the following steps:
|
||||
- Update master `git pull git@github.com:jaegertracing/jaeger-operator.git master`
|
||||
- Clone both repositories `upstream-community-operators` and `community-operators`
|
||||
- Run `make operatorhub`
|
||||
* If you have [`gh`](https://cli.github.com/) installed and configured, it will open the necessary PRs for you automatically.
|
||||
* If you don't have it, the branches will be pushed to `origin` and you should be able to open the PR from there
|
||||
|
||||
## Generating the changelog
|
||||
|
||||
- Get the `OAUTH_TOKEN` from [Github](https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token), select `repo:status` scope.
|
||||
|
@ -8,65 +45,3 @@
|
|||
* CI or testing-specific commits (e2e, unit test, ...)
|
||||
* bug fixes for problems that are not part of a release yet
|
||||
* version bumps for internal dependencies
|
||||
|
||||
## Releasing
|
||||
|
||||
Steps to release a new version of the Jaeger Operator:
|
||||
|
||||
|
||||
1. Change the `versions.txt `so that it lists the target version of the Jaeger (if it is required). **Don't touch the operator version**: it will be changed automatically in the next step.
|
||||
|
||||
2. Confirm that `MIN_KUBERNETES_VERSION` and `MIN_OPENSHIFT_VERSION` in the `Makefile` are still up-to-date, and update them if required.
|
||||
|
||||
2. Run `OPERATOR_VERSION=1.30.0 make prepare-release`, using the operator version that will be released.
|
||||
|
||||
3. Run the E2E tests in OpenShift as described in [the CONTRIBUTING.md](CONTRIBUTING.md#an-external-cluster-like-openshift) file. The tests will be executed automatically in Kubernetes by the GitHub Actions CI later.
|
||||
|
||||
4. Prepare a changelog since last release.
|
||||
|
||||
4. Update the release manager schedule.
|
||||
|
||||
5. Commit the changes and create a pull request:
|
||||
|
||||
```sh
|
||||
git commit -sm "Preparing release v1.30.0"
|
||||
```
|
||||
|
||||
5. Once the changes above are merged and available in `main` tag it with the desired version, prefixed with `v`, eg. `v1.30.0`
|
||||
|
||||
```sh
|
||||
git checkout main
|
||||
git tag v1.30.0
|
||||
git push git@github.com:jaegertracing/jaeger-operator.git v1.30.0
|
||||
```
|
||||
|
||||
6. The GitHub Workflow will take it from here, creating a GitHub release and publishing the images
|
||||
|
||||
7. After the release, PRs needs to be created against the Operator Hub Community Operators repositories:
|
||||
|
||||
* One for the [upstream-community-operators](https://github.com/k8s-operatorhub/community-operators), used by OLM on Kubernetes.
|
||||
* One for the [community-operators](https://github.com/redhat-openshift-ecosystem/community-operators-prod) used by OpenShift.
|
||||
|
||||
This can be done with the following steps:
|
||||
- Update main `git pull git@github.com:jaegertracing/jaeger-operator.git main`
|
||||
- Clone both repositories `upstream-community-operators` and `community-operators`
|
||||
- Run `make operatorhub`
|
||||
* If you have [`gh`](https://cli.github.com/) installed and configured, it will open the necessary PRs for you automatically.
|
||||
* If you don't have it, the branches will be pushed to `origin` and you should be able to open the PR from there
|
||||
|
||||
## Note
|
||||
After the PRs have been made it must be ensured that:
|
||||
- Images listed in the ClusterServiceVersion (CSV) have a versions tag [#1682](https://github.com/jaegertracing/jaeger-operator/issues/1682)
|
||||
- No `bundle` folder is included in the release
|
||||
- No foreign CRs like prometheus are in the manifests
|
||||
|
||||
## Release managers
|
||||
|
||||
The operator should be released within a week after the [Jaeger release](https://github.com/jaegertracing/jaeger/blob/main/RELEASE.md#release-managers).
|
||||
|
||||
| Version | Release Manager |
|
||||
|---------| -------------------------------------------------------- |
|
||||
| 1.63.0 | [Benedikt Bongartz](https://github.com/frzifus) |
|
||||
| 1.64.0 | [Pavol Loffay](https://github.com/pavolloffay) |
|
||||
| 1.65.0 | [Israel Blancas](https://github.com/iblancasa) |
|
||||
| 1.66.0 | [Ruben Vargas](https://github.com/rubenvp8510) |
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
// Package v1 contains API Schema definitions for the jaegertracing.io v1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=jaegertracing.io
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "jaegertracing.io", Version: "v1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
|
@ -1,164 +0,0 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultElasticsearchName = "elasticsearch"
|
||||
)
|
||||
|
||||
// log is for logging in this package.
|
||||
var (
|
||||
jaegerlog = logf.Log.WithName("jaeger-resource")
|
||||
cl client.Client
|
||||
)
|
||||
|
||||
// SetupWebhookWithManager adds Jaeger webook to the manager.
|
||||
func (j *Jaeger) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
cl = mgr.GetClient()
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(j).
|
||||
Complete()
|
||||
}
|
||||
|
||||
//+kubebuilder:webhook:path=/mutate-jaegertracing-io-v1-jaeger,mutating=true,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=mjaeger.kb.io,admissionReviewVersions={v1}
|
||||
|
||||
func (j *Jaeger) objsWithOptions() []*Options {
|
||||
return []*Options{
|
||||
&j.Spec.AllInOne.Options, &j.Spec.Query.Options, &j.Spec.Collector.Options,
|
||||
&j.Spec.Ingester.Options, &j.Spec.Agent.Options, &j.Spec.Storage.Options,
|
||||
}
|
||||
}
|
||||
|
||||
// Default implements webhook.Defaulter so a webhook will be registered for the type
|
||||
func (j *Jaeger) Default() {
|
||||
jaegerlog.Info("default", "name", j.Name)
|
||||
jaegerlog.Info("WARNING jaeger-agent is deprecated and will removed in v1.55.0. See https://github.com/jaegertracing/jaeger/issues/4739", "component", "agent")
|
||||
|
||||
if j.Spec.Storage.Elasticsearch.Name == "" {
|
||||
j.Spec.Storage.Elasticsearch.Name = defaultElasticsearchName
|
||||
}
|
||||
|
||||
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
|
||||
// check if ES instance exists
|
||||
es := &esv1.Elasticsearch{}
|
||||
err := cl.Get(context.Background(), types.NamespacedName{
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Spec.Storage.Elasticsearch.Name,
|
||||
}, es)
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
j.Spec.Storage.Elasticsearch.NodeCount = OpenShiftElasticsearchNodeCount(es.Spec)
|
||||
}
|
||||
|
||||
for _, opt := range j.objsWithOptions() {
|
||||
optCopy := opt.DeepCopy()
|
||||
if f := getAdditionalTLSFlags(optCopy.ToArgs()); f != nil {
|
||||
newOpts := optCopy.GenericMap()
|
||||
for k, v := range f {
|
||||
newOpts[k] = v
|
||||
}
|
||||
|
||||
if err := opt.parse(newOpts); err != nil {
|
||||
jaegerlog.Error(err, "name", j.Name, "method", "Option.Parse")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
|
||||
//+kubebuilder:webhook:path=/validate-jaegertracing-io-v1-jaeger,mutating=false,failurePolicy=fail,sideEffects=None,groups=jaegertracing.io,resources=jaegers,verbs=create;update,versions=v1,name=vjaeger.kb.io,admissionReviewVersions={v1}
|
||||
|
||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *Jaeger) ValidateCreate() (admission.Warnings, error) {
|
||||
jaegerlog.Info("validate create", "name", j.Name)
|
||||
return j.ValidateUpdate(nil)
|
||||
}
|
||||
|
||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *Jaeger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
|
||||
jaegerlog.Info("validate update", "name", j.Name)
|
||||
|
||||
if ShouldInjectOpenShiftElasticsearchConfiguration(j.Spec.Storage) && j.Spec.Storage.Elasticsearch.DoNotProvision {
|
||||
// check if ES instance exists
|
||||
es := &esv1.Elasticsearch{}
|
||||
err := cl.Get(context.Background(), types.NamespacedName{
|
||||
Namespace: j.Namespace,
|
||||
Name: j.Spec.Storage.Elasticsearch.Name,
|
||||
}, es)
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("elasticsearch instance not found: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, opt := range j.objsWithOptions() {
|
||||
got := opt.DeepCopy().ToArgs()
|
||||
if f := getAdditionalTLSFlags(got); f != nil {
|
||||
return nil, fmt.Errorf("tls flags incomplete, got: %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *Jaeger) ValidateDelete() (admission.Warnings, error) {
|
||||
jaegerlog.Info("validate delete", "name", j.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// OpenShiftElasticsearchNodeCount returns total node count of Elasticsearch nodes.
|
||||
func OpenShiftElasticsearchNodeCount(spec esv1.ElasticsearchSpec) int32 {
|
||||
nodes := int32(0)
|
||||
for i := 0; i < len(spec.Nodes); i++ {
|
||||
nodes += spec.Nodes[i].NodeCount
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// ShouldInjectOpenShiftElasticsearchConfiguration returns true if OpenShift Elasticsearch is used and its configuration should be used.
|
||||
func ShouldInjectOpenShiftElasticsearchConfiguration(s JaegerStorageSpec) bool {
|
||||
if s.Type != JaegerESStorage {
|
||||
return false
|
||||
}
|
||||
_, ok := s.Options.Map()["es.server-urls"]
|
||||
return !ok
|
||||
}
|
||||
|
||||
var (
|
||||
tlsFlag = regexp.MustCompile("--.*tls.*=")
|
||||
tlsFlagIdx = regexp.MustCompile("--.*tls")
|
||||
tlsEnabledExists = regexp.MustCompile("--.*tls.enabled")
|
||||
)
|
||||
|
||||
// getAdditionalTLSFlags returns additional tls arguments based on the argument
|
||||
// list. If no additional argument is needed, nil is returned.
|
||||
func getAdditionalTLSFlags(args []string) map[string]interface{} {
|
||||
var res map[string]interface{}
|
||||
for _, arg := range args {
|
||||
a := []byte(arg)
|
||||
if tlsEnabledExists.Match(a) {
|
||||
// NOTE: if flag exists, we are done.
|
||||
return nil
|
||||
}
|
||||
if tlsFlag.Match(a) && res == nil {
|
||||
idx := tlsFlagIdx.FindIndex(a)
|
||||
res = make(map[string]interface{})
|
||||
res[arg[idx[0]+2:idx[1]]+".enabled"] = "true"
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
|
@ -1,369 +0,0 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
var (
|
||||
_ webhook.Defaulter = &Jaeger{}
|
||||
_ webhook.Validator = &Jaeger{}
|
||||
)
|
||||
|
||||
func TestDefault(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
objs []runtime.Object
|
||||
j *Jaeger
|
||||
expected *Jaeger
|
||||
}{
|
||||
{
|
||||
name: "set missing ES name",
|
||||
j: &Jaeger{
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Jaeger{
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "elasticsearch",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set ES node count",
|
||||
objs: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "project1",
|
||||
},
|
||||
},
|
||||
&esv1.Elasticsearch{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-es",
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: esv1.ElasticsearchSpec{
|
||||
Nodes: []esv1.ElasticsearchNode{
|
||||
{
|
||||
NodeCount: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
j: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
DoNotProvision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
NodeCount: 3,
|
||||
DoNotProvision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "do not set ES node count",
|
||||
j: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
DoNotProvision: false,
|
||||
NodeCount: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
NodeCount: 1,
|
||||
DoNotProvision: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing tls enable flag",
|
||||
j: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: JaegerMemoryStorage,
|
||||
Options: NewOptions(map[string]interface{}{"stuff.tls.test": "something"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: JaegerMemoryStorage,
|
||||
Options: NewOptions(
|
||||
map[string]interface{}{
|
||||
"stuff.tls.test": "something",
|
||||
"stuff.tls.enabled": "true",
|
||||
},
|
||||
),
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: defaultElasticsearchName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
|
||||
require.NoError(t, AddToScheme(scheme.Scheme))
|
||||
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objs...).Build()
|
||||
cl = fakeCl
|
||||
|
||||
test.j.Default()
|
||||
assert.Equal(t, test.expected, test.j)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDelete(t *testing.T) {
|
||||
warnings, err := new(Jaeger).ValidateDelete()
|
||||
assert.Nil(t, warnings)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
objsToCreate []runtime.Object
|
||||
current *Jaeger
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "ES instance exists",
|
||||
objsToCreate: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "project1",
|
||||
},
|
||||
},
|
||||
&esv1.Elasticsearch{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-es",
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: esv1.ElasticsearchSpec{
|
||||
Nodes: []esv1.ElasticsearchNode{
|
||||
{
|
||||
NodeCount: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
current: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
DoNotProvision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ES instance does not exist",
|
||||
objsToCreate: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "project1",
|
||||
},
|
||||
},
|
||||
},
|
||||
current: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Type: "elasticsearch",
|
||||
Elasticsearch: ElasticsearchSpec{
|
||||
Name: "my-es",
|
||||
DoNotProvision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: `elasticsearch instance not found: elasticsearchs.logging.openshift.io "my-es" not found`,
|
||||
},
|
||||
{
|
||||
name: "missing tls options",
|
||||
current: &Jaeger{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "project1",
|
||||
},
|
||||
Spec: JaegerSpec{
|
||||
Storage: JaegerStorageSpec{
|
||||
Options: NewOptions(map[string]interface{}{
|
||||
"something.tls.else": "fails",
|
||||
}),
|
||||
Type: JaegerMemoryStorage,
|
||||
},
|
||||
},
|
||||
},
|
||||
err: `tls flags incomplete, got: [--something.tls.else=fails]`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
require.NoError(t, esv1.AddToScheme(scheme.Scheme))
|
||||
require.NoError(t, AddToScheme(scheme.Scheme))
|
||||
fakeCl := fake.NewClientBuilder().WithRuntimeObjects(test.objsToCreate...).Build()
|
||||
cl = fakeCl
|
||||
|
||||
warnings, err := test.current.ValidateCreate()
|
||||
if test.err != "" {
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, test.err, err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Nil(t, warnings)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldDeployElasticsearch(t *testing.T) {
|
||||
tests := []struct {
|
||||
j JaegerStorageSpec
|
||||
expected bool
|
||||
}{
|
||||
{j: JaegerStorageSpec{}},
|
||||
{j: JaegerStorageSpec{Type: JaegerCassandraStorage}},
|
||||
{j: JaegerStorageSpec{Type: JaegerESStorage, Options: NewOptions(map[string]interface{}{"es.server-urls": "foo"})}},
|
||||
{j: JaegerStorageSpec{Type: JaegerESStorage}, expected: true},
|
||||
}
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, ShouldInjectOpenShiftElasticsearchConfiguration(test.j))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAdditionalTLSFlags(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
args []string
|
||||
expect map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "no tls flag",
|
||||
args: []string{"--something.else"},
|
||||
expect: nil,
|
||||
},
|
||||
{
|
||||
name: "already enabled",
|
||||
args: []string{"--something.tls.enabled=true", "--something.tls.else=abc"},
|
||||
expect: nil,
|
||||
},
|
||||
{
|
||||
name: "is disabled",
|
||||
args: []string{"--tls.enabled=false", "--something.else", "--something.tls.else=abc"},
|
||||
expect: nil,
|
||||
},
|
||||
{
|
||||
name: "must be enabled",
|
||||
args: []string{"--something.tls.else=abc"},
|
||||
expect: map[string]interface{}{
|
||||
"something.tls.enabled": "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
// NOTE: we want to avoid something like:
|
||||
// --kafka.consumer.authentication=tls.enabled=true
|
||||
name: "enable consumer tls",
|
||||
args: []string{
|
||||
"--es.server-urls=http://elasticsearch:9200",
|
||||
"--kafka.consumer.authentication=tls",
|
||||
"--kafka.consumer.brokers=my-cluster-kafka-bootstrap:9093",
|
||||
"--kafka.consumer.tls.ca=/var/run/secrets/cluster-ca/ca.crt",
|
||||
"--kafka.consumer.tls.cert=/var/run/secrets/kafkauser/user.crt",
|
||||
"--kafka.consumer.tls.key=/var/run/secrets/kafkauser/user.key",
|
||||
},
|
||||
expect: map[string]interface{}{
|
||||
"kafka.consumer.tls.enabled": "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := getAdditionalTLSFlags(tc.args)
|
||||
if !cmp.Equal(tc.expect, got) {
|
||||
t.Error("err:", cmp.Diff(tc.expect, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// Logger returns a logger filled with context-related fields, such as Name and Namespace
|
||||
func (j *Jaeger) Logger() logr.Logger {
|
||||
return logf.Log.WithValues(
|
||||
"instance", j.Name,
|
||||
"namespace", j.Namespace,
|
||||
)
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16 as builder
|
||||
|
||||
ARG JAEGER_VERSION
|
||||
ENV JAEGER_VERSION=${JAEGER_VERSION}
|
||||
|
||||
COPY . /go/src/github.com/jaegertracing/jaeger-operator/
|
||||
WORKDIR /go/src/github.com/jaegertracing/jaeger-operator
|
||||
|
||||
ARG GOPROXY
|
||||
# download deps before gobuild
|
||||
RUN go mod download -x
|
||||
|
||||
# Dockerfile `FROM --platform=${BUILDPLATFORM}` means
|
||||
# prepare image for build for matched BUILDPLATFORM, eq. linux/amd64
|
||||
# by this way, we could avoid to using qemu, which slow down compiling process.
|
||||
# and usefully for language who support multi-arch build like go.
|
||||
# see last part of https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||
ARG TARGETARCH
|
||||
# when --platform=linux/amd64,linux/arm64
|
||||
#
|
||||
# for $TARGETARCH in "amd64 arm64" do
|
||||
RUN make gobuild OUTPUT_BINARY=/go/bin/jaeger-operator-${TARGETARCH} GOARCH=${TARGETARCH}
|
||||
# done
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi
|
||||
|
||||
ENV OPERATOR=/usr/local/bin/jaeger-operator \
|
||||
USER_UID=1001 \
|
||||
USER_NAME=jaeger-operator
|
||||
|
||||
RUN INSTALL_PKGS=" \
|
||||
openssl \
|
||||
" && \
|
||||
yum install -y $INSTALL_PKGS && \
|
||||
rpm -V $INSTALL_PKGS && \
|
||||
yum clean all && \
|
||||
mkdir /tmp/_working_dir && \
|
||||
chmod og+w /tmp/_working_dir
|
||||
|
||||
COPY --from=builder /go/src/github.com/jaegertracing/jaeger-operator/scripts/* /scripts/
|
||||
|
||||
# install operator binary
|
||||
ARG TARGETARCH
|
||||
COPY --from=builder /go/bin/jaeger-operator-${TARGETARCH} ${OPERATOR}
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/jaeger-operator"]
|
||||
|
||||
USER ${USER_UID}:${USER_UID}
|
|
@ -1,19 +0,0 @@
|
|||
FROM scratch
|
||||
|
||||
# Core bundle labels.
|
||||
LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1
|
||||
LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/
|
||||
LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/
|
||||
LABEL operators.operatorframework.io.bundle.package.v1=jaeger
|
||||
LABEL operators.operatorframework.io.bundle.channels.v1=stable
|
||||
LABEL operators.operatorframework.io.bundle.channel.default.v1=stable
|
||||
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.13.0+git
|
||||
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
|
||||
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
|
||||
|
||||
# OpenShift specific labels.
|
||||
LABEL com.redhat.openshift.versions=v4.12
|
||||
|
||||
# Copy files to locations specified by labels.
|
||||
COPY bundle/manifests /manifests/
|
||||
COPY bundle/metadata /metadata/
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
name: jaeger-operator-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
verbs:
|
||||
- get
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app.kubernetes.io/component: metrics
|
||||
name: jaeger-operator
|
||||
name: jaeger-operator-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
name: jaeger-operator
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
name: jaeger-operator-webhook-service
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 9443
|
||||
selector:
|
||||
name: jaeger-operator
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
name: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s
|
||||
namespace: openshift-monitoring
|
|
@ -1,14 +0,0 @@
|
|||
annotations:
|
||||
# Core bundle annotations.
|
||||
operators.operatorframework.io.bundle.mediatype.v1: registry+v1
|
||||
operators.operatorframework.io.bundle.manifests.v1: manifests/
|
||||
operators.operatorframework.io.bundle.metadata.v1: metadata/
|
||||
operators.operatorframework.io.bundle.package.v1: jaeger
|
||||
operators.operatorframework.io.bundle.channels.v1: stable
|
||||
operators.operatorframework.io.bundle.channel.default.v1: stable
|
||||
operators.operatorframework.io.metrics.builder: operator-sdk-v1.13.0+git
|
||||
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
|
||||
operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
|
||||
|
||||
# OpenShift annotations
|
||||
com.redhat.openshift.versions: v4.12
|
|
@ -1,70 +0,0 @@
|
|||
apiVersion: scorecard.operatorframework.io/v1alpha3
|
||||
kind: Configuration
|
||||
metadata:
|
||||
name: config
|
||||
stages:
|
||||
- parallel: false
|
||||
tests:
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- basic-check-spec
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: basic
|
||||
test: basic-check-spec-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- olm-bundle-validation
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: olm
|
||||
test: olm-bundle-validation-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- olm-crds-have-validation
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: olm
|
||||
test: olm-crds-have-validation-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- olm-crds-have-resources
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: olm
|
||||
test: olm-crds-have-resources-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- olm-spec-descriptors
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: olm
|
||||
test: olm-spec-descriptors-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
- entrypoint:
|
||||
- scorecard-test
|
||||
- olm-status-descriptors
|
||||
image: quay.io/operator-framework/scorecard-test:v1.32.0
|
||||
labels:
|
||||
suite: olm
|
||||
test: olm-status-descriptors-test
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
||||
storage:
|
||||
spec:
|
||||
mountPath: {}
|
|
@ -0,0 +1,13 @@
|
|||
package main
|
||||
|
||||
import "github.com/jaegertracing/jaeger-operator/cmd"
|
||||
|
||||
func main() {
|
||||
// Note that this file should be identical to the main.go at the root of the project
|
||||
// It would really be nice if this one here wouldn't be required, but the Operator SDK
|
||||
// requires it...
|
||||
// https://github.com/operator-framework/operator-sdk/blob/master/doc/migration/v0.1.0-migration-guide.md#copy-changes-from-maingo
|
||||
// > operator-sdk now expects cmd/manager/main.go to be present in Go operator projects.
|
||||
// > Go project-specific commands, ex. add [api, controller], will error if main.go is not found in its expected path.
|
||||
cmd.Execute()
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
# The following manifests contain a self-signed issuer CR and a certificate CR.
|
||||
# More document can be found at https://docs.cert-manager.io
|
||||
# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes.
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: selfsigned-issuer
|
||||
namespace: system
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
|
||||
namespace: system
|
||||
spec:
|
||||
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
|
||||
dnsNames:
|
||||
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
|
||||
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: selfsigned-issuer
|
||||
secretName: jaeger-operator-service-cert # this secret will not be prefixed, since it's not managed by kustomize
|
||||
subject:
|
||||
organizationalUnits:
|
||||
- "jaeger-operator"
|
|
@ -1,7 +0,0 @@
|
|||
resources:
|
||||
- certificate.yaml
|
||||
|
||||
namePrefix: jaeger-operator-
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
|
@ -1,16 +0,0 @@
|
|||
# This configuration is for teaching kustomize how to update name ref and var substitution
|
||||
nameReference:
|
||||
- kind: Issuer
|
||||
group: cert-manager.io
|
||||
fieldSpecs:
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/issuerRef/name
|
||||
|
||||
varReference:
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/commonName
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/dnsNames
|
|
@ -1,23 +0,0 @@
|
|||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/jaegertracing.io_jaegers.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patchesStrategicMerge:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
#- patches/webhook_in_jaegers.yaml
|
||||
#- patches/webhook_in_kafkas.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
- patches/cainjection_in_jaegers.yaml
|
||||
#- patches/cainjection_in_kafkas.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
|
@ -1,19 +0,0 @@
|
|||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
|
@ -1,7 +0,0 @@
|
|||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
name: jaegers.jaegertracing.io
|
|
@ -1,16 +0,0 @@
|
|||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: jaegers.jaegertracing.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: jaeger-operator-webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
|
@ -1,69 +0,0 @@
|
|||
# Adds namespace to all resources.
|
||||
namespace: observability
|
||||
|
||||
# Value of this field is prepended to the
|
||||
# names of all resources, e.g. a deployment named
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
|
||||
# The prefix is not used here because the manager's deployment name is jaeger-operator
|
||||
# which means that the manifest would have to contain an empty name which is not allowed.
|
||||
#namePrefix: jaeger-operator-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
# https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/#labels
|
||||
commonLabels:
|
||||
name: jaeger-operator
|
||||
|
||||
bases:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
- ../webhook
|
||||
- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
|
||||
- manager_webhook_patch.yaml
|
||||
- webhookcainjection_patch.yaml
|
||||
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
#- manager_config_patch.yaml
|
||||
|
||||
# the following config is for teaching kustomize how to do var substitution
|
||||
vars:
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
||||
objref:
|
||||
kind: Certificate
|
||||
group: cert-manager.io
|
||||
version: v1
|
||||
name: serving-cert # this name should match the one in certificate.yaml
|
||||
fieldref:
|
||||
fieldpath: metadata.namespace
|
||||
- name: CERTIFICATE_NAME
|
||||
objref:
|
||||
kind: Certificate
|
||||
group: cert-manager.io
|
||||
version: v1
|
||||
name: serving-cert # this name should match the one in certificate.yaml
|
||||
- name: SERVICE_NAMESPACE # namespace of the service
|
||||
objref:
|
||||
kind: Service
|
||||
version: v1
|
||||
name: webhook-service
|
||||
fieldref:
|
||||
fieldpath: metadata.namespace
|
||||
- name: SERVICE_NAME
|
||||
objref:
|
||||
kind: Service
|
||||
version: v1
|
||||
name: webhook-service
|
|
@ -1,33 +0,0 @@
|
|||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jaeger-operator
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8383/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: jaeger-operator
|
||||
args:
|
||||
- "start"
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--leader-elect"
|
|
@ -1,19 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jaeger-operator
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: manager
|
||||
args:
|
||||
- "--config=controller_manager_config.yaml"
|
||||
volumeMounts:
|
||||
- name: manager-config
|
||||
mountPath: /controller_manager_config.yaml
|
||||
subPath: controller_manager_config.yaml
|
||||
volumes:
|
||||
- name: manager-config
|
||||
configMap:
|
||||
name: manager-config
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jaeger-operator
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: jaeger-operator
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: jaeger-operator-service-cert
|
|
@ -1,15 +0,0 @@
|
|||
# This patch add annotation to admission webhook config and
|
||||
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: mutating-webhook-configuration
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: validating-webhook-configuration
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
|
||||
kind: ControllerManagerConfig
|
||||
health:
|
||||
healthProbeBindAddress: :8081
|
||||
metrics:
|
||||
bindAddress: 127.0.0.1:8080
|
||||
webhook:
|
||||
port: 9443
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
resourceName: 31e04290.jaegertracing.io
|
|
@ -1,8 +0,0 @@
|
|||
resources:
|
||||
- manager.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: quay.io/jaegertracing/jaeger-operator
|
||||
newTag: 1.65.0
|
|
@ -1,83 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jaeger-operator
|
||||
labels:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
strategy: {}
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
- command:
|
||||
- /jaeger-operator
|
||||
args:
|
||||
- start
|
||||
- --leader-elect
|
||||
image: controller:latest
|
||||
name: jaeger-operator
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.annotations['olm.targetNamespaces']
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: OPERATOR_NAME
|
||||
value: "jaeger-operator"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
- name: LOG-LEVEL
|
||||
value: DEBUG
|
||||
- name: KAFKA-PROVISIONING-MINIMAL
|
||||
value: "true"
|
||||
serviceAccountName: jaeger-operator
|
||||
terminationGracePeriodSeconds: 10
|
|
@ -1,27 +0,0 @@
|
|||
# These resources constitute the fully configured set of manifests
|
||||
# used to generate the 'manifests/' directory in a bundle.
|
||||
resources:
|
||||
- bases/jaeger-operator.clusterserviceversion.yaml
|
||||
- ../default
|
||||
- ../samples
|
||||
#- ../scorecard
|
||||
|
||||
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
|
||||
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
|
||||
#patchesJson6902:
|
||||
#- target:
|
||||
# group: apps
|
||||
# version: v1
|
||||
# kind: Deployment
|
||||
# name: controller-manager
|
||||
# namespace: system
|
||||
# patch: |-
|
||||
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
|
||||
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
|
||||
# - op: remove
|
||||
# path: /spec/template/spec/containers/1/volumeMounts/0
|
||||
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
|
||||
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
|
||||
# - op: remove
|
||||
# path: /spec/template/spec/volumes/0
|
|
@ -1,8 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../default
|
||||
|
||||
components:
|
||||
- ./patch
|
|
@ -1,40 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1alpha1
|
||||
kind: Component
|
||||
patches:
|
||||
- patch: |-
|
||||
$patch: delete
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: jaeger-operator-metrics-reader
|
||||
- patch: |
|
||||
- op: replace
|
||||
path: /kind
|
||||
value: Role
|
||||
target:
|
||||
group: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
- patch: |
|
||||
- op: replace
|
||||
path: /roleRef/kind
|
||||
value: Role
|
||||
target:
|
||||
group: rbac.authorization.k8s.io
|
||||
kind: ClusterRoleBinding
|
||||
- patch: |
|
||||
- op: replace
|
||||
path: /kind
|
||||
value: RoleBinding
|
||||
target:
|
||||
group: rbac.authorization.k8s.io
|
||||
kind: ClusterRoleBinding
|
||||
|
||||
- target:
|
||||
group: apps
|
||||
version: v1
|
||||
name: jaeger-operator
|
||||
kind: Deployment
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/env/0/valueFrom/fieldRef/fieldPath
|
||||
value: metadata.namespace
|
|
@ -1,2 +0,0 @@
|
|||
resources:
|
||||
- monitor.yaml
|
|
@ -1,22 +0,0 @@
|
|||
|
||||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
name: jaeger-operator-metrics-monitor
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
targetPort: 8443
|
||||
scheme: https
|
||||
interval: 30s
|
||||
scrapeTimeout: 10s
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
name: jaeger-operator
|
||||
app.kubernetes.io/component: metrics
|
|
@ -1,9 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: jaeger-operator-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: proxy-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: jaeger-operator-proxy-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: proxy-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jaeger-operator
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: jaeger-operator
|
||||
app.kubernetes.io/component: metrics
|
||||
name: jaeger-operator-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
name: jaeger-operator
|
|
@ -1,24 +0,0 @@
|
|||
# permissions for end users to edit jaegers.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: jaeger-operator-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
resources:
|
||||
- jaegers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
resources:
|
||||
- jaegers/status
|
||||
verbs:
|
||||
- get
|
|
@ -1,20 +0,0 @@
|
|||
# permissions for end users to view jaegers.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: jaeger-operator-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
resources:
|
||||
- jaegers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
resources:
|
||||
- jaegers/status
|
||||
verbs:
|
||||
- get
|
|
@ -1,20 +0,0 @@
|
|||
resources:
|
||||
# All RBAC will be applied under this service account in
|
||||
# the deployment namespace. You may comment out this resource
|
||||
# if your manager will use a service account that exists at
|
||||
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||
# subjects if changing service account names.
|
||||
- service_account.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# Comment the following 4 lines if you want to disable
|
||||
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
|
||||
# which protects your /metrics endpoint.
|
||||
- auth_proxy_service.yaml
|
||||
- auth_proxy_role.yaml
|
||||
- auth_proxy_role_binding.yaml
|
||||
- auth_proxy_client_clusterrole.yaml
|
||||
- prometheus_role.yaml
|
||||
- prometheus_role_binding.yaml
|
|
@ -1,37 +0,0 @@
|
|||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
|
@ -1,16 +0,0 @@
|
|||
# Grant cluster-monitoring access to openshift-operators-redhat metrics
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s
|
||||
namespace: openshift-monitoring
|
|
@ -1,6 +0,0 @@
|
|||
apiVersion: jaegertracing.io/v1
|
||||
kind: "Jaeger"
|
||||
metadata:
|
||||
name: "my-jaeger"
|
||||
spec:
|
||||
strategy: allInOne
|
|
@ -1,4 +0,0 @@
|
|||
## Append samples you want in your CSV to this file as resources ##
|
||||
resources:
|
||||
- jaegertracing.io_v1_jaeger.yaml
|
||||
#+kubebuilder:scaffold:manifestskustomizesamples
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: mutating-webhook-configuration
|
||||
webhooks:
|
||||
- name: deployment.sidecar-injector.jaegertracing.io
|
||||
objectSelector: # Skip resources with the name jaeger-operator
|
||||
matchExpressions:
|
||||
- key: name
|
||||
operator: NotIn
|
||||
values:
|
||||
- "jaeger-operator"
|
|
@ -1,11 +0,0 @@
|
|||
resources:
|
||||
- manifests.yaml
|
||||
- service.yaml
|
||||
|
||||
namePrefix: jaeger-operator-
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
|
||||
patchesStrategicMerge:
|
||||
- deployment_inject_patch.yaml
|
|
@ -1,26 +0,0 @@
|
|||
# the following config is for teaching kustomize where to look at when substituting vars.
|
||||
# It requires kustomize v2.1.0 or newer to work properly.
|
||||
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: MutatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/name
|
||||
- kind: ValidatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: MutatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/namespace
|
||||
create: true
|
||||
- kind: ValidatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/namespace
|
||||
create: true
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
|
@ -1,72 +0,0 @@
|
|||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-jaegertracing-io-v1-jaeger
|
||||
failurePolicy: Fail
|
||||
name: mjaeger.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- jaegers
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-v1-deployment
|
||||
failurePolicy: Ignore
|
||||
name: deployment.sidecar-injector.jaegertracing.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- deployments
|
||||
sideEffects: None
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-jaegertracing-io-v1-jaeger
|
||||
failurePolicy: Fail
|
||||
name: vjaeger.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- jaegertracing.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- jaegers
|
||||
sideEffects: None
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 9443
|
|
@ -1,222 +0,0 @@
|
|||
package appsv1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/config/ca"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/inject"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/tracing"
|
||||
)
|
||||
|
||||
var _ webhook.AdmissionHandler = (*deploymentInterceptor)(nil)
|
||||
|
||||
// NewDeploymentInterceptorWebhook creates a new deployment mutating webhook to be registered
|
||||
func NewDeploymentInterceptorWebhook(c client.Client, decoder *admission.Decoder) webhook.AdmissionHandler {
|
||||
return &deploymentInterceptor{
|
||||
client: c,
|
||||
decoder: decoder,
|
||||
}
|
||||
}
|
||||
|
||||
// You need to ensure the path here match the path in the marker.
|
||||
// +kubebuilder:webhook:path=/mutate-v1-deployment,mutating=true,failurePolicy=ignore,groups="apps",resources=deployments,sideEffects=None,verbs=create;update,versions=v1,name=deployment.sidecar-injector.jaegertracing.io,admissionReviewVersions=v1
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch
|
||||
|
||||
// deploymentInterceptor label pods if Sidecar is specified in deployment
|
||||
type deploymentInterceptor struct {
|
||||
client client.Client
|
||||
decoder *admission.Decoder
|
||||
}
|
||||
|
||||
func (d *deploymentInterceptor) shouldHandleDeployment(req admission.Request) bool {
|
||||
if namespaces := viper.GetString(v1.ConfigWatchNamespace); namespaces != v1.WatchAllNamespaces {
|
||||
for _, ns := range strings.Split(namespaces, ",") {
|
||||
if strings.EqualFold(ns, req.Namespace) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Handle adds a label to a generated pod if deployment or namespace provide annotaion
|
||||
func (d *deploymentInterceptor) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
|
||||
ctx, span := tracer.Start(ctx, "reconcileDeployment")
|
||||
span.SetAttributes(
|
||||
attribute.String("kind", req.Kind.String()),
|
||||
attribute.String("name", req.Name),
|
||||
attribute.String("namespace", req.Namespace),
|
||||
)
|
||||
|
||||
if !d.shouldHandleDeployment(req) {
|
||||
return admission.Allowed("not watching in namespace, we do not touch the deployment")
|
||||
}
|
||||
|
||||
defer span.End()
|
||||
|
||||
logger := log.Log.WithValues("namespace", req.Namespace)
|
||||
logger.V(-1).Info("verify deployment")
|
||||
|
||||
dep := &appsv1.Deployment{}
|
||||
err := d.decoder.Decode(req, dep)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to decode deployment")
|
||||
return admission.Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
if dep.Labels["app"] == "jaeger" && dep.Labels["app.kubernetes.io/component"] != "query" {
|
||||
// Don't touch jaeger deployments
|
||||
return admission.Allowed("is jaeger deployment, we do not touch it")
|
||||
}
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
err = d.client.Get(ctx, types.NamespacedName{Name: req.Namespace}, ns)
|
||||
// we shouldn't fail if the namespace object can't be obtained
|
||||
if err != nil {
|
||||
msg := "failed to get the namespace for the deployment, skipping injection based on namespace annotation"
|
||||
logger.Error(err, msg)
|
||||
span.AddEvent(msg, trace.WithAttributes(attribute.String("error", err.Error())))
|
||||
}
|
||||
|
||||
jaegers := &v1.JaegerList{}
|
||||
opts := []client.ListOption{}
|
||||
|
||||
if viper.GetString(v1.ConfigOperatorScope) == v1.OperatorScopeNamespace {
|
||||
opts = append(opts, client.InNamespace(viper.GetString(v1.ConfigWatchNamespace)))
|
||||
}
|
||||
|
||||
if err := d.client.List(ctx, jaegers, opts...); err != nil {
|
||||
logger.Error(err, "failed to get the available Jaeger pods")
|
||||
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
|
||||
}
|
||||
|
||||
if inject.Needed(dep, ns) {
|
||||
jaeger := inject.Select(dep, ns, jaegers)
|
||||
if jaeger != nil && jaeger.GetDeletionTimestamp() == nil {
|
||||
logger := logger.WithValues(
|
||||
"jaeger", jaeger.Name,
|
||||
"jaeger-namespace", jaeger.Namespace,
|
||||
)
|
||||
if jaeger.Namespace != dep.Namespace {
|
||||
if err := reconcileConfigMaps(ctx, d.client, jaeger, dep); err != nil {
|
||||
const msg = "failed to reconcile config maps for the namespace"
|
||||
logger.Error(err, msg)
|
||||
span.AddEvent(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// a suitable jaeger instance was found! let's inject a sidecar pointing to it then
|
||||
// Verified that jaeger instance was found and is not marked for deletion.
|
||||
{
|
||||
msg := "injecting Jaeger Agent sidecar"
|
||||
logger.Info(msg)
|
||||
span.AddEvent(msg)
|
||||
}
|
||||
|
||||
envConfigMaps := corev1.ConfigMapList{}
|
||||
d.client.List(ctx, &envConfigMaps, client.InNamespace(dep.Namespace))
|
||||
dep = inject.Sidecar(jaeger, dep, inject.WithEnvFromConfigMaps(inject.GetConfigMapsMatchedEnvFromInDeployment(*dep, envConfigMaps.Items)))
|
||||
marshaledDeploy, err := json.Marshal(dep)
|
||||
if err != nil {
|
||||
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
|
||||
}
|
||||
|
||||
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledDeploy)
|
||||
}
|
||||
|
||||
const msg = "no suitable Jaeger instances found to inject a sidecar"
|
||||
span.AddEvent(msg)
|
||||
logger.V(-1).Info(msg)
|
||||
return admission.Allowed(msg)
|
||||
}
|
||||
|
||||
if hasAgent, _ := inject.HasJaegerAgent(dep); hasAgent {
|
||||
if _, hasLabel := dep.Labels[inject.Label]; hasLabel {
|
||||
const msg = "remove sidecar"
|
||||
logger.Info(msg)
|
||||
span.AddEvent(msg)
|
||||
inject.CleanSidecar(dep.Labels[inject.Label], dep)
|
||||
marshaledDeploy, err := json.Marshal(dep)
|
||||
if err != nil {
|
||||
return admission.Errored(http.StatusInternalServerError, tracing.HandleError(err, span))
|
||||
}
|
||||
|
||||
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledDeploy)
|
||||
}
|
||||
}
|
||||
return admission.Allowed("no action needed")
|
||||
}
|
||||
|
||||
// deploymentInterceptor implements admission.DecoderInjector.
|
||||
// A decoder will be automatically injected.
|
||||
|
||||
// InjectDecoder injects the decoder.
|
||||
func (d *deploymentInterceptor) InjectDecoder(decoder *admission.Decoder) error {
|
||||
d.decoder = decoder
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileConfigMaps(ctx context.Context, cl client.Client, jaeger *v1.Jaeger, dep *appsv1.Deployment) error {
|
||||
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
|
||||
ctx, span := tracer.Start(ctx, "reconcileConfigMaps")
|
||||
defer span.End()
|
||||
|
||||
cms := []*corev1.ConfigMap{}
|
||||
if cm := ca.GetTrustedCABundle(jaeger); cm != nil {
|
||||
cms = append(cms, cm)
|
||||
}
|
||||
if cm := ca.GetServiceCABundle(jaeger); cm != nil {
|
||||
cms = append(cms, cm)
|
||||
}
|
||||
|
||||
for _, cm := range cms {
|
||||
if err := reconcileConfigMap(ctx, cl, cm, dep); err != nil {
|
||||
return tracing.HandleError(err, span)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileConfigMap(ctx context.Context, cl client.Client, cm *corev1.ConfigMap, dep *appsv1.Deployment) error {
|
||||
tracer := otel.GetTracerProvider().Tracer(v1.ReconciliationTracer)
|
||||
ctx, span := tracer.Start(ctx, "reconcileConfigMap")
|
||||
defer span.End()
|
||||
|
||||
// Update the namespace to be the same as the Deployment being injected
|
||||
cm.Namespace = dep.Namespace
|
||||
span.SetAttributes(attribute.String("name", cm.Name), attribute.String("namespace", cm.Namespace))
|
||||
|
||||
if err := cl.Create(ctx, cm); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
span.AddEvent("config map exists already")
|
||||
} else {
|
||||
return tracing.HandleError(err, span)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,436 +0,0 @@
|
|||
package appsv1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
jsonpatch "gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/inject"
|
||||
)
|
||||
|
||||
func TestReconcileConfigMaps(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
existing []runtime.Object
|
||||
errors errorGroup
|
||||
expect error
|
||||
}{
|
||||
{
|
||||
desc: "all config maps missing",
|
||||
},
|
||||
{
|
||||
desc: "none missing",
|
||||
existing: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "my-instance-trusted-ca",
|
||||
},
|
||||
},
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "my-instance-service-ca",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "can not create",
|
||||
errors: errorGroup{createErr: fmt.Errorf("ups, cant create things")},
|
||||
expect: fmt.Errorf("ups, cant create things"),
|
||||
existing: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "my-instance-trusted-ca",
|
||||
},
|
||||
},
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "my-instance-service-ca",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tC := range testCases {
|
||||
t.Run(tC.desc, func(t *testing.T) {
|
||||
// prepare
|
||||
jaeger := v1.NewJaeger(types.NamespacedName{
|
||||
Namespace: "observability",
|
||||
Name: "my-instance",
|
||||
})
|
||||
dep := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "my-dep",
|
||||
},
|
||||
}
|
||||
|
||||
cl := &failingClient{
|
||||
WithWatch: fake.NewClientBuilder().WithRuntimeObjects(tC.existing...).Build(),
|
||||
errors: tC.errors,
|
||||
}
|
||||
|
||||
autodetect.OperatorConfiguration.SetPlatform(autodetect.OpenShiftPlatform)
|
||||
|
||||
// test
|
||||
err := reconcileConfigMaps(context.Background(), cl, jaeger, &dep)
|
||||
|
||||
// verify
|
||||
assert.Equal(t, tC.expect, err)
|
||||
|
||||
cms := corev1.ConfigMapList{}
|
||||
err = cl.List(context.Background(), &cms)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, cms.Items, 2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type failingClient struct {
|
||||
client.WithWatch
|
||||
|
||||
errors errorGroup
|
||||
}
|
||||
|
||||
type errorGroup struct {
|
||||
listErr error
|
||||
getErr error
|
||||
createErr error
|
||||
}
|
||||
|
||||
func (u *failingClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
if u.errors.listErr != nil {
|
||||
return u.errors.listErr
|
||||
}
|
||||
return u.WithWatch.List(ctx, list, opts...)
|
||||
}
|
||||
|
||||
func (u *failingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
|
||||
if u.errors.getErr != nil {
|
||||
return u.errors.getErr
|
||||
}
|
||||
return u.WithWatch.Get(ctx, key, obj, opts...)
|
||||
}
|
||||
|
||||
func (u *failingClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
if u.errors.createErr != nil {
|
||||
return u.errors.createErr
|
||||
}
|
||||
return u.WithWatch.Create(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func TestReconcilieDeployment(t *testing.T) {
|
||||
namespacedName := types.NamespacedName{
|
||||
Name: "jaeger-query",
|
||||
Namespace: "my-ns",
|
||||
}
|
||||
|
||||
jaeger := v1.NewJaeger(types.NamespacedName{
|
||||
Namespace: "observability",
|
||||
Name: "my-instance",
|
||||
})
|
||||
|
||||
s := scheme.Scheme
|
||||
s.AddKnownTypes(v1.GroupVersion, jaeger)
|
||||
s.AddKnownTypes(v1.GroupVersion, &v1.JaegerList{})
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
dep *appsv1.Deployment
|
||||
jaeger *v1.Jaeger
|
||||
resp admission.Response
|
||||
errors errorGroup
|
||||
emptyRequest bool
|
||||
watch_ns string
|
||||
}{
|
||||
{
|
||||
desc: "no content to decode",
|
||||
dep: &appsv1.Deployment{},
|
||||
resp: admission.Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Message: "there is no content to decode",
|
||||
Code: 400,
|
||||
},
|
||||
},
|
||||
},
|
||||
emptyRequest: true,
|
||||
},
|
||||
{
|
||||
desc: "can not get namespaces and list jaegers",
|
||||
errors: errorGroup{
|
||||
listErr: fmt.Errorf("ups cant list"),
|
||||
getErr: fmt.Errorf("ups cant get"),
|
||||
},
|
||||
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{},
|
||||
Labels: map[string]string{
|
||||
"app": "not jaeger",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "only_container",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
resp: admission.Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Message: "ups cant list",
|
||||
Code: 500,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should not remove the instance from a jaeger component",
|
||||
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{},
|
||||
Labels: map[string]string{
|
||||
"app": "jaeger",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "only_container",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
resp: admission.Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Message: "is jaeger deployment, we do not touch it",
|
||||
Code: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should remove the instance",
|
||||
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "only_container",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
resp: admission.Response{
|
||||
Patches: []jsonpatch.JsonPatchOperation{
|
||||
{
|
||||
Operation: "remove",
|
||||
Path: "/metadata/labels",
|
||||
},
|
||||
{
|
||||
Operation: "remove",
|
||||
Path: "/spec/template/spec/containers/1",
|
||||
},
|
||||
},
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
PatchType: func() *admissionv1.PatchType { str := admissionv1.PatchTypeJSONPatch; return &str }(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should inject but no jaeger instace found",
|
||||
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{
|
||||
inject.Annotation: "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "something",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "only_container",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
resp: admission.Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Message: "no suitable Jaeger instances found to inject a sidecar",
|
||||
Code: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should inject but empty instance - no patch",
|
||||
dep: inject.Sidecar(jaeger, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{
|
||||
inject.Annotation: "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "something",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "only_container",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
resp: admission.Response{
|
||||
Patches: []jsonpatch.JsonPatchOperation{},
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
},
|
||||
},
|
||||
jaeger: &v1.Jaeger{},
|
||||
},
|
||||
{
|
||||
desc: "should not touch deployment on other namespaces != watch_namespaces",
|
||||
dep: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
Annotations: map[string]string{},
|
||||
Labels: map[string]string{
|
||||
"app": "not jaeger",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{},
|
||||
},
|
||||
resp: admission.Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Message: "not watching in namespace, we do not touch the deployment",
|
||||
Code: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
watch_ns: "my-other-ns, other-ns-2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
viper.Set(v1.ConfigWatchNamespace, tc.watch_ns)
|
||||
defer viper.Reset()
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
res := []runtime.Object{tc.dep, ns}
|
||||
if tc.jaeger != nil {
|
||||
res = append(res, tc.jaeger)
|
||||
}
|
||||
|
||||
cl := &failingClient{
|
||||
WithWatch: fake.NewClientBuilder().WithRuntimeObjects(res...).Build(),
|
||||
errors: tc.errors,
|
||||
}
|
||||
|
||||
decoder := admission.NewDecoder(scheme.Scheme)
|
||||
r := NewDeploymentInterceptorWebhook(cl, decoder)
|
||||
|
||||
req := admission.Request{}
|
||||
if !tc.emptyRequest {
|
||||
req = admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Name: tc.dep.Name,
|
||||
Namespace: tc.dep.Namespace,
|
||||
Object: runtime.RawExtension{
|
||||
Raw: func() []byte {
|
||||
var buf bytes.Buffer
|
||||
if getErr := json.NewEncoder(&buf).Encode(tc.dep); getErr != nil {
|
||||
t.Fatal(getErr)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
resp := r.Handle(context.Background(), req)
|
||||
|
||||
assert.Len(t, resp.Patches, len(tc.resp.Patches))
|
||||
sort.Slice(resp.Patches, func(i, j int) bool {
|
||||
return resp.Patches[i].Path < resp.Patches[j].Path
|
||||
})
|
||||
sort.Slice(tc.resp.Patches, func(i, j int) bool {
|
||||
return tc.resp.Patches[i].Path < tc.resp.Patches[j].Path
|
||||
})
|
||||
|
||||
assert.Equal(t, tc.resp, resp)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package appsv1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/controller/namespace"
|
||||
)
|
||||
|
||||
// NamespaceReconciler reconciles a Deployment object
|
||||
type NamespaceReconciler struct {
|
||||
reconcilier *namespace.ReconcileNamespace
|
||||
}
|
||||
|
||||
// NewNamespaceReconciler creates a new namespace reconcilier controller
|
||||
func NewNamespaceReconciler(client client.Client, clientReader client.Reader, scheme *runtime.Scheme) *NamespaceReconciler {
|
||||
return &NamespaceReconciler{
|
||||
reconcilier: namespace.New(client, clientReader, scheme),
|
||||
}
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch
|
||||
|
||||
// Reconcile namespace resource
|
||||
func (r *NamespaceReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
return r.reconcilier.Reconcile(request)
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
err := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Namespace{}).
|
||||
Complete(r)
|
||||
return err
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
package appsv1_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
"github.com/jaegertracing/jaeger-operator/controllers/appsv1"
|
||||
)
|
||||
|
||||
func TestNamespaceControllerRegisterWithManager(t *testing.T) {
|
||||
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
|
||||
|
||||
// prepare
|
||||
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
|
||||
require.NoError(t, err)
|
||||
reconciler := appsv1.NewNamespaceReconciler(
|
||||
k8sClient,
|
||||
k8sClient,
|
||||
testScheme,
|
||||
)
|
||||
|
||||
// test
|
||||
err = reconciler.SetupWithManager(mgr)
|
||||
|
||||
// verify
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNewNamespaceInstance(t *testing.T) {
|
||||
// prepare
|
||||
nsn := types.NamespacedName{Name: "my-instance", Namespace: "default"}
|
||||
reconciler := appsv1.NewNamespaceReconciler(
|
||||
k8sClient,
|
||||
k8sClient,
|
||||
testScheme,
|
||||
)
|
||||
|
||||
instance := v1.NewJaeger(nsn)
|
||||
err := k8sClient.Create(context.Background(), instance)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := k8sreconcile.Request{
|
||||
NamespacedName: nsn,
|
||||
}
|
||||
|
||||
_, err = reconciler.Reconcile(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package appsv1_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
testScheme *runtime.Scheme = scheme.Scheme
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start testEnv: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme.Scheme); err != nil {
|
||||
fmt.Printf("failed to register scheme: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
|
||||
if err != nil {
|
||||
fmt.Printf("failed to setup a Kubernetes client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
|
||||
err = testEnv.Stop()
|
||||
if err != nil {
|
||||
fmt.Printf("failed to stop testEnv: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
os.Exit(code)
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package elasticsearch
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/autodetect"
|
||||
"github.com/jaegertracing/jaeger-operator/pkg/controller/elasticsearch"
|
||||
)
|
||||
|
||||
// Reconciler reconciles a Deployment object
|
||||
type Reconciler struct {
|
||||
reconcilier *elasticsearch.ReconcileElasticsearch
|
||||
}
|
||||
|
||||
// NewReconciler creates a new deployment reconciler controller
|
||||
func NewReconciler(client client.Client, clientReader client.Reader) *Reconciler {
|
||||
return &Reconciler{
|
||||
reconcilier: elasticsearch.New(client, clientReader),
|
||||
}
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=logging.openshift.io,resources=elasticsearch,verbs=get;list;watch;create;update;patch;delete
|
||||
|
||||
// Reconcile deployment resource
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
return r.reconcilier.Reconcile(ctx, request)
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
esCRDInstalled, err := isOpenShiftESCRDAvailable(mgr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if esCRDInstalled {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&esv1.Elasticsearch{}).
|
||||
Complete(r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const elasticsearchGroup = "logging.openshift.io"
|
||||
|
||||
func isOpenShiftESCRDAvailable(mgr ctrl.Manager) (bool, error) {
|
||||
dcl, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
apiLists, err := autodetect.AvailableAPIs(dcl, map[string]bool{elasticsearchGroup: true})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return autodetect.IsElasticsearchOperatorAvailable(apiLists), nil
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package elasticsearch_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
esv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8sconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
k8sreconcile "sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
"github.com/jaegertracing/jaeger-operator/controllers/elasticsearch"
|
||||
)
|
||||
|
||||
func TestElasticSearchSetupWithManager(t *testing.T) {
|
||||
t.Skip("this test requires a real cluster, otherwise the GetConfigOrDie will die")
|
||||
|
||||
// prepare
|
||||
mgr, err := manager.New(k8sconfig.GetConfigOrDie(), manager.Options{})
|
||||
require.NoError(t, err)
|
||||
reconciler := elasticsearch.NewReconciler(
|
||||
k8sClient,
|
||||
k8sClient,
|
||||
)
|
||||
|
||||
// test
|
||||
err = reconciler.SetupWithManager(mgr)
|
||||
|
||||
// verify
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNewElasticSearchInstance(t *testing.T) {
|
||||
// prepare
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ns",
|
||||
},
|
||||
}
|
||||
|
||||
es := &esv1.Elasticsearch{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-es",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
}
|
||||
|
||||
jaeger := v1.NewJaeger(types.NamespacedName{
|
||||
Name: "test-jaeger",
|
||||
Namespace: "test-jaeger",
|
||||
})
|
||||
|
||||
esv1.AddToScheme(testScheme)
|
||||
v1.AddToScheme(testScheme)
|
||||
|
||||
client := fake.NewClientBuilder().WithRuntimeObjects(ns, es, jaeger).Build()
|
||||
reconciler := elasticsearch.NewReconciler(
|
||||
client,
|
||||
client,
|
||||
)
|
||||
|
||||
req := k8sreconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: "test-es",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := reconciler.Reconcile(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package elasticsearch_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/jaegertracing/jaeger-operator/apis/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
testScheme *runtime.Scheme = scheme.Scheme
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start testEnv: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme.Scheme); err != nil {
|
||||
fmt.Printf("failed to register scheme: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
|
||||
if err != nil {
|
||||
fmt.Printf("failed to setup a Kubernetes client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
|
||||
err = testEnv.Stop()
|
||||
if err != nil {
|
||||
fmt.Printf("failed to stop testEnv: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
os.Exit(code)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue