Compare commits
55 Commits
Author | SHA1 | Date |
---|---|---|
|
d8ffedf554 | |
|
cd063ab390 | |
|
acc9bd2c2c | |
|
689d22db2f | |
|
e042033165 | |
|
6639cdc0b1 | |
|
20dbbaf043 | |
|
4865e1c1cc | |
|
1f818f405f | |
|
9dd4cb87cc | |
|
5f4c66fff3 | |
|
00c8ac17e5 | |
|
ca114d6dd1 | |
|
6672bf3881 | |
|
afba12c519 | |
|
443b0bceaf | |
|
ff002642ec | |
|
aedc7addc3 | |
|
2fc3600372 | |
|
f9492dc5b8 | |
|
d39653b029 | |
|
d2466fed70 | |
|
389c175874 | |
|
90d8eba51e | |
|
1c0cc674c9 | |
|
e729600f60 | |
|
a8a8a9e03e | |
|
545903fbc7 | |
|
e2df64833a | |
|
f1c322f92a | |
|
28ac44da86 | |
|
6da3d6cc5f | |
|
0d49a56089 | |
|
7697135a64 | |
|
3840266b9a | |
|
13fde85eb5 | |
|
4dd6b96a95 | |
|
c292ad93c5 | |
|
0f5accf268 | |
|
6b3bce23dd | |
|
3387a748e5 | |
|
7d6cffee8a | |
|
6de6707008 | |
|
edb31de815 | |
|
b9a6fa1157 | |
|
97592a043f | |
|
74c39f385d | |
|
0ba647a794 | |
|
7c4af1bc91 | |
|
93d77108d9 | |
|
02e2947ab0 | |
|
152ea334bf | |
|
9da89159ed | |
|
082efcd3fc | |
|
236126bd98 |
|
@ -47,7 +47,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -57,8 +57,9 @@ jobs:
|
|||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif'
|
||||
cache: false
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
|
@ -66,6 +67,7 @@ jobs:
|
|||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
|
|
|
@ -32,6 +32,10 @@ jobs:
|
|||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -42,7 +46,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -52,8 +56,9 @@ jobs:
|
|||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
output: 'trivy-results.sarif'
|
||||
cache: false
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
|
@ -61,6 +66,7 @@ jobs:
|
|||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# validate any chart changes under charts directory
|
||||
name: Chart
|
||||
env:
|
||||
HELM_VERSION: v3.11.2
|
||||
KUSTOMIZE_VERSION: 5.4.3
|
||||
HELM_VERSION: v3.17.3
|
||||
KUSTOMIZE_VERSION: 5.6.0
|
||||
on:
|
||||
push:
|
||||
# Exclude branches created by Dependabot to avoid triggering current workflow
|
||||
|
|
|
@ -167,7 +167,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Generate sbom for karmada file system
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
with:
|
||||
scan-type: 'fs'
|
||||
format: 'spdx'
|
||||
|
|
|
@ -182,6 +182,58 @@ data:
|
|||
{{- $.Files.Get $path | nindent 8 }}
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ $name }}-hook-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
|
||||
{{- if eq .Values.certs.mode "custom" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
|
@ -446,56 +498,5 @@ spec:
|
|||
- name: configs
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ $name }}-hook-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.21.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.21.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
ARG TARGETPLATFORM
|
||||
|
|
|
@ -164,28 +164,25 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
ClusterConfig: clusterConfig,
|
||||
}
|
||||
|
||||
id, err := util.ObtainClusterID(clusterKubeClient)
|
||||
registerOption.ClusterID, err = util.ObtainClusterID(clusterKubeClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, name, err := util.IsClusterIdentifyUnique(karmadaClient, id)
|
||||
if err != nil {
|
||||
if err = registerOption.Validate(karmadaClient, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok && opts.ClusterName != name {
|
||||
return fmt.Errorf("the same cluster has been registered with name %s", name)
|
||||
}
|
||||
|
||||
registerOption.ClusterID = id
|
||||
|
||||
clusterSecret, impersonatorSecret, err := util.ObtainCredentialsFromMemberCluster(clusterKubeClient, registerOption)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
registerOption.Secret = *clusterSecret
|
||||
registerOption.ImpersonatorSecret = *impersonatorSecret
|
||||
if clusterSecret != nil {
|
||||
registerOption.Secret = *clusterSecret
|
||||
}
|
||||
if impersonatorSecret != nil {
|
||||
registerOption.ImpersonatorSecret = *impersonatorSecret
|
||||
}
|
||||
err = util.RegisterClusterInControllerPlane(registerOption, controlPlaneKubeClient, generateClusterInControllerPlane)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register with karmada control plane: %w", err)
|
||||
|
@ -261,8 +258,8 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
return fmt.Errorf("failed to setup custom resource interpreter: %w", err)
|
||||
if err := resourceInterpreter.Start(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed to start resource interpreter: %w", err)
|
||||
}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, resourceInterpreter)
|
||||
|
|
|
@ -752,8 +752,8 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
klog.Fatalf("Failed to setup custom resource interpreter: %v", err)
|
||||
if err := resourceInterpreter.Start(context.Background()); err != nil {
|
||||
klog.Fatalf("Failed to start resource interpreter: %v", err)
|
||||
}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, resourceInterpreter)
|
||||
|
|
|
@ -70,4 +70,4 @@ kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REP
|
|||
kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/operator/config/deploy/karmada-operator-deployment.yaml"
|
||||
|
||||
# wait karmada-operator ready
|
||||
kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" wait --for=condition=Ready --timeout=30s pods -l karmada-app=karmada-operator -n ${KARMADA_SYSTEM_NAMESPACE}
|
||||
kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" wait --for=condition=Ready --timeout=30s pods -l app.kubernetes.io/name=karmada-operator -n ${KARMADA_SYSTEM_NAMESPACE}
|
||||
|
|
|
@ -84,7 +84,7 @@ cd -
|
|||
CRDTARBALL_URL="http://local"
|
||||
DATA_DIR="/var/lib/karmada"
|
||||
CRD_CACHE_DIR=$(getCrdsDir "${DATA_DIR}" "${CRDTARBALL_URL}")
|
||||
OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l karmada-app=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers)
|
||||
OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l app.kubernetes.io/name=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers)
|
||||
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" exec -i ${OPERATOR_POD_NAME} -n ${KARMADA_SYSTEM_NAMESPACE} -- mkdir -p ${CRD_CACHE_DIR}
|
||||
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" cp ${REPO_ROOT}/crds.tar.gz ${KARMADA_SYSTEM_NAMESPACE}/${OPERATOR_POD_NAME}:${CRD_CACHE_DIR}
|
||||
|
||||
|
|
|
@ -77,6 +77,6 @@ cd -
|
|||
CRDTARBALL_URL="http://local"
|
||||
DATA_DIR="/var/lib/karmada"
|
||||
CRD_CACHE_DIR=$(getCrdsDir "${DATA_DIR}" "${CRDTARBALL_URL}")
|
||||
OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l karmada-app=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers)
|
||||
OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l app.kubernetes.io/name=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers)
|
||||
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" exec -i ${OPERATOR_POD_NAME} -n ${KARMADA_SYSTEM_NAMESPACE} -- mkdir -p ${CRD_CACHE_DIR}
|
||||
kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" cp ${REPO_ROOT}/crds.tar.gz ${KARMADA_SYSTEM_NAMESPACE}/${OPERATOR_POD_NAME}:${CRD_CACHE_DIR}
|
||||
|
|
|
@ -3,7 +3,7 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: karmada-operator
|
||||
labels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"] # karmada-operator requires access to the Lease resource for leader election
|
||||
|
|
|
@ -3,7 +3,7 @@ kind: ClusterRoleBinding
|
|||
metadata:
|
||||
name: karmada-operator
|
||||
labels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
|
|
@ -4,16 +4,16 @@ metadata:
|
|||
name: karmada-operator
|
||||
namespace: karmada-system
|
||||
labels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: karmada-operator
|
||||
|
|
|
@ -4,4 +4,4 @@ metadata:
|
|||
name: karmada-operator
|
||||
namespace: karmada-system
|
||||
labels:
|
||||
karmada-app: karmada-operator
|
||||
app.kubernetes.io/name: karmada-operator
|
||||
|
|
|
@ -121,6 +121,11 @@ const (
|
|||
|
||||
// APIServiceName defines the karmada aggregated apiserver APIService resource name.
|
||||
APIServiceName = "v1alpha1.cluster.karmada.io"
|
||||
|
||||
// AppNameLabel defines the recommended label for identifying an application.
|
||||
AppNameLabel = "app.kubernetes.io/name"
|
||||
// AppInstanceLabel defines the recommended label for identifying an application instance.
|
||||
AppInstanceLabel = "app.kubernetes.io/instance"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -52,17 +52,18 @@ func EnsureKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operatorv
|
|||
|
||||
func installKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAPIServer, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, _ map[string]bool) error {
|
||||
apiserverDeploymentBytes, err := util.ParseTemplate(KarmadaApiserverDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
ServiceSubnet, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
ServiceSubnet, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KarmadaAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
ServiceSubnet: *cfg.ServiceSubnet,
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
ServiceSubnet: *cfg.ServiceSubnet,
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing karmadaApiserver deployment template: %w", err)
|
||||
|
@ -91,11 +92,12 @@ func installKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.K
|
|||
|
||||
func createKarmadaAPIServerService(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAPIServer, name, namespace string) error {
|
||||
karmadaApiserverServiceBytes, err := util.ParseTemplate(KarmadaApiserverService, struct {
|
||||
ServiceName, Namespace, ServiceType string
|
||||
KarmadaInstanceName, ServiceName, Namespace, ServiceType string
|
||||
}{
|
||||
ServiceName: util.KarmadaAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
ServiceType: string(cfg.ServiceType),
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
ServiceType: string(cfg.ServiceType),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing karmadaApiserver serive template: %w", err)
|
||||
|
@ -117,17 +119,18 @@ func createKarmadaAPIServerService(client clientset.Interface, cfg *operatorv1al
|
|||
|
||||
func installKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAggregatedAPIServer, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, featureGates map[string]bool) error {
|
||||
aggregatedAPIServerDeploymentBytes, err := util.ParseTemplate(KarmadaAggregatedAPIServerDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KarmadaAggregatedAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaAggregatedAPIServerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaAggregatedAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaAggregatedAPIServerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing karmadaAggregatedAPIServer deployment template: %w", err)
|
||||
|
@ -155,10 +158,11 @@ func installKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operator
|
|||
|
||||
func createKarmadaAggregatedAPIServerService(client clientset.Interface, name, namespace string) error {
|
||||
aggregatedAPIServerServiceBytes, err := util.ParseTemplate(KarmadaAggregatedAPIServerService, struct {
|
||||
ServiceName, Namespace string
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
}{
|
||||
ServiceName: util.KarmadaAggregatedAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaAggregatedAPIServerName(name),
|
||||
Namespace: namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing karmadaAggregatedAPIServer serive template: %w", err)
|
||||
|
|
|
@ -23,19 +23,22 @@ apiVersion: apps/v1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-apiserver
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
app.kubernetes.io/name: karmada-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-apiserver
|
||||
app.kubernetes.io/name: karmada-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-apiserver
|
||||
app.kubernetes.io/name: karmada-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
|
@ -69,7 +72,6 @@ spec:
|
|||
- --max-requests-inflight=1500
|
||||
- --max-mutating-requests-inflight=500
|
||||
- --v=4
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
|
@ -94,11 +96,15 @@ spec:
|
|||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: karmada-app
|
||||
operator: In
|
||||
values:
|
||||
- karmada-apiserver
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
values:
|
||||
- karmada-apiserver
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .KarmadaInstanceName }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
ports:
|
||||
- containerPort: 5443
|
||||
|
@ -120,8 +126,9 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-apiserver
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
app.kubernetes.io/name: karmada-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
|
@ -131,29 +138,33 @@ spec:
|
|||
protocol: TCP
|
||||
targetPort: 5443
|
||||
selector:
|
||||
karmada-app: karmada-apiserver
|
||||
app.kubernetes.io/name: karmada-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
type: {{ .ServiceType }}
|
||||
`
|
||||
|
||||
// KarmadaAggregatedAPIServerDeployment is karmada aggreagated apiserver deployment manifest
|
||||
// KarmadaAggregatedAPIServerDeployment is karmada aggregated apiserver deployment manifest
|
||||
KarmadaAggregatedAPIServerDeployment = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
app.kubernetes.io/name: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/name: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/name: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
|
@ -185,14 +196,16 @@ spec:
|
|||
secret:
|
||||
secretName: {{ .KarmadaCertsSecret }}
|
||||
`
|
||||
|
||||
// KarmadaAggregatedAPIServerService is karmada aggregated APIServer Service manifest
|
||||
KarmadaAggregatedAPIServerService = `
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
app.kubernetes.io/name: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
|
@ -201,7 +214,8 @@ spec:
|
|||
protocol: TCP
|
||||
targetPort: 443
|
||||
selector:
|
||||
karmada-app: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/name: karmada-aggregated-apiserver
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
type: ClusterIP
|
||||
`
|
||||
)
|
||||
|
|
|
@ -84,17 +84,18 @@ func getComponentManifests(name, namespace string, featureGates map[string]bool,
|
|||
|
||||
func getKubeControllerManagerManifest(name, namespace string, cfg *operatorv1alpha1.KubeControllerManager) (*appsv1.Deployment, error) {
|
||||
kubeControllerManagerBytes, err := util.ParseTemplate(KubeControllerManagerDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KarmadaCertsSecret, KubeconfigSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KarmadaCertsSecret, KubeconfigSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KubeControllerManagerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KubeControllerManagerName(name)),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KubeControllerManagerName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KubeControllerManagerName(name)),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when parsing kube-controller-manager deployment template: %w", err)
|
||||
|
@ -113,17 +114,18 @@ func getKubeControllerManagerManifest(name, namespace string, cfg *operatorv1alp
|
|||
|
||||
func getKarmadaControllerManagerManifest(name, namespace string, featureGates map[string]bool, cfg *operatorv1alpha1.KarmadaControllerManager) (*appsv1.Deployment, error) {
|
||||
karmadaControllerManagerBytes, err := util.ParseTemplate(KamradaControllerManagerDeployment, struct {
|
||||
Replicas *int32
|
||||
DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret string
|
||||
}{
|
||||
DeploymentName: util.KarmadaControllerManagerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaControllerManagerName(name)),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaControllerManagerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaControllerManagerName(name)),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when parsing karmada-controller-manager deployment template: %w", err)
|
||||
|
@ -142,18 +144,19 @@ func getKarmadaControllerManagerManifest(name, namespace string, featureGates ma
|
|||
|
||||
func getKarmadaSchedulerManifest(name, namespace string, featureGates map[string]bool, cfg *operatorv1alpha1.KarmadaScheduler) (*appsv1.Deployment, error) {
|
||||
karmadaSchedulerBytes, err := util.ParseTemplate(KarmadaSchedulerDeployment, struct {
|
||||
Replicas *int32
|
||||
DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string
|
||||
}{
|
||||
DeploymentName: util.KarmadaSchedulerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaSchedulerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaSchedulerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaSchedulerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when parsing karmada-scheduler deployment template: %w", err)
|
||||
|
@ -172,18 +175,19 @@ func getKarmadaSchedulerManifest(name, namespace string, featureGates map[string
|
|||
|
||||
func getKarmadaDeschedulerManifest(name, namespace string, featureGates map[string]bool, cfg *operatorv1alpha1.KarmadaDescheduler) (*appsv1.Deployment, error) {
|
||||
karmadaDeschedulerBytes, err := util.ParseTemplate(KarmadaDeschedulerDeployment, struct {
|
||||
Replicas *int32
|
||||
DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, SystemNamespace string
|
||||
Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string
|
||||
}{
|
||||
DeploymentName: util.KarmadaDeschedulerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaDeschedulerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaDeschedulerName(name),
|
||||
Namespace: namespace,
|
||||
SystemNamespace: constants.KarmadaSystemNamespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaDeschedulerName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when parsing karmada-descheduler deployment template: %w", err)
|
||||
|
|
|
@ -62,11 +62,12 @@ func installKarmadaEtcd(client clientset.Interface, name, namespace string, cfg
|
|||
}
|
||||
|
||||
etcdStatefulSetBytes, err := util.ParseTemplate(KarmadaEtcdStatefulSet, struct {
|
||||
StatefulSetName, Namespace, Image, ImagePullPolicy, EtcdClientService string
|
||||
CertsSecretName, EtcdPeerServiceName string
|
||||
InitialCluster, EtcdDataVolumeName, EtcdCipherSuites string
|
||||
Replicas, EtcdListenClientPort, EtcdListenPeerPort int32
|
||||
KarmadaInstanceName, StatefulSetName, Namespace, Image, ImagePullPolicy, EtcdClientService string
|
||||
CertsSecretName, EtcdPeerServiceName string
|
||||
InitialCluster, EtcdDataVolumeName, EtcdCipherSuites string
|
||||
Replicas, EtcdListenClientPort, EtcdListenPeerPort int32
|
||||
}{
|
||||
KarmadaInstanceName: name,
|
||||
StatefulSetName: util.KarmadaEtcdName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
|
@ -103,9 +104,10 @@ func installKarmadaEtcd(client clientset.Interface, name, namespace string, cfg
|
|||
|
||||
func createEtcdService(client clientset.Interface, name, namespace string) error {
|
||||
etcdServicePeerBytes, err := util.ParseTemplate(KarmadaEtcdPeerService, struct {
|
||||
ServiceName, Namespace string
|
||||
EtcdListenClientPort, EtcdListenPeerPort int32
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
EtcdListenClientPort, EtcdListenPeerPort int32
|
||||
}{
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaEtcdName(name),
|
||||
Namespace: namespace,
|
||||
EtcdListenClientPort: constants.EtcdListenClientPort,
|
||||
|
@ -125,9 +127,10 @@ func createEtcdService(client clientset.Interface, name, namespace string) error
|
|||
}
|
||||
|
||||
etcdClientServiceBytes, err := util.ParseTemplate(KarmadaEtcdClientService, struct {
|
||||
ServiceName, Namespace string
|
||||
EtcdListenClientPort int32
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
EtcdListenClientPort int32
|
||||
}{
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaEtcdClientName(name),
|
||||
Namespace: namespace,
|
||||
EtcdListenClientPort: constants.EtcdListenClientPort,
|
||||
|
|
|
@ -23,7 +23,8 @@ apiVersion: apps/v1
|
|||
kind: StatefulSet
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: etcd
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
namespace: {{ .Namespace }}
|
||||
name: {{ .StatefulSetName }}
|
||||
|
@ -33,13 +34,13 @@ spec:
|
|||
podManagementPolicy: Parallel
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: etcd
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: etcd
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
|
@ -49,7 +50,7 @@ spec:
|
|||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- --name=$(KARMADA_ETCD_NAME)
|
||||
- --listen-client-urls= https://0.0.0.0:{{ .EtcdListenClientPort }}
|
||||
- --listen-client-urls=https://0.0.0.0:{{ .EtcdListenClientPort }}
|
||||
- --listen-peer-urls=http://0.0.0.0:{{ .EtcdListenPeerPort }}
|
||||
- --advertise-client-urls=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }}
|
||||
- --initial-cluster={{ .InitialCluster }}
|
||||
|
@ -103,7 +104,8 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: etcd
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
|
@ -114,33 +116,36 @@ spec:
|
|||
protocol: TCP
|
||||
targetPort: {{ .EtcdListenClientPort }}
|
||||
selector:
|
||||
karmada-app: etcd
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
type: ClusterIP
|
||||
`
|
||||
`
|
||||
|
||||
// KarmadaEtcdPeerService is karmada etcd peer Service manifest
|
||||
KarmadaEtcdPeerService = `
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: etcd
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: client
|
||||
port: {{ .EtcdListenClientPort }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .EtcdListenClientPort }}
|
||||
- name: server
|
||||
port: {{ .EtcdListenPeerPort }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .EtcdListenPeerPort }}
|
||||
selector:
|
||||
karmada-app: etcd
|
||||
type: ClusterIP
|
||||
`
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: client
|
||||
port: {{ .EtcdListenClientPort }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .EtcdListenClientPort }}
|
||||
- name: server
|
||||
port: {{ .EtcdListenPeerPort }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .EtcdListenPeerPort }}
|
||||
selector:
|
||||
app.kubernetes.io/name: etcd
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
type: ClusterIP
|
||||
`
|
||||
)
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package controlplane
|
||||
|
||||
const (
|
||||
// KubeControllerManagerDeployment is KubeControllerManage deployment manifest
|
||||
// KubeControllerManagerDeployment is KubeControllerManager deployment manifest
|
||||
KubeControllerManagerDeployment = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
@ -25,68 +25,76 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: kube-controller-manager
|
||||
app.kubernetes.io/name: kube-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: kube-controller-manager
|
||||
app.kubernetes.io/name: kube-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: kube-controller-manager
|
||||
app.kubernetes.io/name: kube-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: karmada-app
|
||||
operator: In
|
||||
values: ["kube-controller-manager"]
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
values:
|
||||
- kube-controller-manager
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ .KarmadaInstanceName }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- kube-controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --cluster-cidr=10.244.0.0/16
|
||||
- --cluster-name=karmada
|
||||
- --cluster-signing-cert-file=/etc/karmada/pki/ca.crt
|
||||
- --cluster-signing-key-file=/etc/karmada/pki/ca.key
|
||||
- --controllers=namespace,garbagecollector,serviceaccount-token,ttl-after-finished,bootstrapsigner,csrcleaner,csrsigning,clusterrole-aggregation
|
||||
- --leader-elect=true
|
||||
- --node-cidr-mask-size=24
|
||||
- --root-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --service-account-private-key-file=/etc/karmada/pki/karmada.key
|
||||
- --service-cluster-ip-range=10.96.0.0/12
|
||||
- --use-service-account-credentials=true
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10257
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: kube-controller-manager
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- kube-controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --cluster-cidr=10.244.0.0/16
|
||||
- --cluster-name=karmada
|
||||
- --cluster-signing-cert-file=/etc/karmada/pki/ca.crt
|
||||
- --cluster-signing-key-file=/etc/karmada/pki/ca.key
|
||||
- --controllers=namespace,garbagecollector,serviceaccount-token,ttl-after-finished,bootstrapsigner,csrcleaner,csrsigning,clusterrole-aggregation
|
||||
- --leader-elect=true
|
||||
- --node-cidr-mask-size=24
|
||||
- --root-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --service-account-private-key-file=/etc/karmada/pki/karmada.key
|
||||
- --service-cluster-ip-range=10.96.0.0/12
|
||||
- --use-service-account-credentials=true
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10257
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
volumes:
|
||||
- name: karmada-certs
|
||||
secret:
|
||||
|
@ -95,6 +103,7 @@ spec:
|
|||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
`
|
||||
|
||||
// KamradaControllerManagerDeployment is karmada controllerManager Deployment manifest
|
||||
KamradaControllerManagerDeployment = `
|
||||
apiVersion: apps/v1
|
||||
|
@ -103,55 +112,58 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: karmada-controller-manager
|
||||
app.kubernetes.io/name: karmada-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-controller-manager
|
||||
app.kubernetes.io/name: karmada-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-controller-manager
|
||||
app.kubernetes.io/name: karmada-controller-manager
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-controller-manager
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --failover-eviction-timeout=30s
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10357
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: karmada-controller-manager
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --failover-eviction-timeout=30s
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10357
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
volumes:
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
`
|
||||
|
||||
// KarmadaSchedulerDeployment is KarmadaScheduler Deployment manifest
|
||||
|
@ -162,56 +174,59 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: karmada-scheduler
|
||||
app.kubernetes.io/name: karmada-scheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-scheduler
|
||||
app.kubernetes.io/name: karmada-scheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-scheduler
|
||||
app.kubernetes.io/name: karmada-scheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-scheduler
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --enable-scheduler-estimator=true
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10351
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: karmada-scheduler
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --enable-scheduler-estimator=true
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10351
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
volumes:
|
||||
- name: karmada-certs
|
||||
secret:
|
||||
|
@ -229,55 +244,58 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: karmada-descheduler
|
||||
app.kubernetes.io/name: karmada-descheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-descheduler
|
||||
app.kubernetes.io/name: karmada-descheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-descheduler
|
||||
app.kubernetes.io/name: karmada-descheduler
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-descheduler
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10358
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: karmada-descheduler
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --leader-elect-resource-namespace={{ .SystemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10358
|
||||
scheme: HTTP
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-certs
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
volumes:
|
||||
- name: karmada-certs
|
||||
secret:
|
||||
|
|
|
@ -25,73 +25,76 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: karmada-metrics-adapter
|
||||
app.kubernetes.io/name: karmada-metrics-adapter
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-metrics-adapter
|
||||
app.kubernetes.io/name: karmada-metrics-adapter
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-metrics-adapter
|
||||
app.kubernetes.io/name: karmada-metrics-adapter
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-metrics-adapter
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --client-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --tls-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --tls-private-key-file=/etc/karmada/pki/karmada.key
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --audit-log-path=-
|
||||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: karmada-cert
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 1
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
timeoutSeconds: 15
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
- name: karmada-metrics-adapter
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --client-ca-file=/etc/karmada/pki/ca.crt
|
||||
- --tls-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --tls-private-key-file=/etc/karmada/pki/karmada.key
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --audit-log-path=-
|
||||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: karmada-cert
|
||||
mountPath: /etc/karmada/pki
|
||||
readOnly: true
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 1
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
timeoutSeconds: 15
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumes:
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
- name: karmada-cert
|
||||
secret:
|
||||
secretName: {{ .KarmadaCertsSecret }}
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
- name: karmada-cert
|
||||
secret:
|
||||
secretName: {{ .KarmadaCertsSecret }}
|
||||
`
|
||||
|
||||
// KarmadaMetricsAdapterService is karmada-metrics-adapter service manifest
|
||||
|
@ -105,10 +108,11 @@ metadata:
|
|||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
selector:
|
||||
karmada-app: karmada-metrics-adapter
|
||||
app.kubernetes.io/name: karmada-metrics-adapter
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
`
|
||||
)
|
||||
|
|
|
@ -42,17 +42,18 @@ func EnsureKarmadaMetricAdapter(client clientset.Interface, cfg *operatorv1alpha
|
|||
|
||||
func installKarmadaMetricAdapter(client clientset.Interface, cfg *operatorv1alpha1.KarmadaMetricsAdapter, name, namespace string) error {
|
||||
metricAdapterBytes, err := util.ParseTemplate(KarmadaMetricsAdapterDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, KarmadaCertsSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KarmadaMetricsAdapterName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaMetricsAdapterName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaMetricsAdapterName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaMetricsAdapterName(name)),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaMetricAdapter Deployment template: %w", err)
|
||||
|
@ -74,10 +75,11 @@ func installKarmadaMetricAdapter(client clientset.Interface, cfg *operatorv1alph
|
|||
|
||||
func createKarmadaMetricAdapterService(client clientset.Interface, name, namespace string) error {
|
||||
metricAdapterServiceBytes, err := util.ParseTemplate(KarmadaMetricsAdapterService, struct {
|
||||
ServiceName, Namespace string
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
}{
|
||||
ServiceName: util.KarmadaMetricsAdapterName(name),
|
||||
Namespace: namespace,
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaMetricsAdapterName(name),
|
||||
Namespace: namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaMetricAdapter Service template: %w", err)
|
||||
|
|
|
@ -25,19 +25,22 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: karmada-search
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
karmada-app: karmada-search
|
||||
apiserver: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-search
|
||||
app.kubernetes.io/name: karmada-search
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
apiserver: "true"
|
||||
replicas: {{ .Replicas }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-search
|
||||
app.kubernetes.io/name: karmada-search
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
apiserver: "true"
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
|
@ -91,8 +94,9 @@ metadata:
|
|||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: karmada-search
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
karmada-app: karmada-search
|
||||
apiserver: "true"
|
||||
spec:
|
||||
ports:
|
||||
|
@ -100,6 +104,8 @@ spec:
|
|||
protocol: TCP
|
||||
targetPort: 443
|
||||
selector:
|
||||
karmada-app: karmada-search
|
||||
app.kubernetes.io/name: karmada-search
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
apiserver: "true"
|
||||
`
|
||||
)
|
||||
|
|
|
@ -43,17 +43,18 @@ func EnsureKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.Karma
|
|||
|
||||
func installKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.KarmadaSearch, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, _ map[string]bool) error {
|
||||
searchDeploymentSetBytes, err := util.ParseTemplate(KarmadaSearchDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy, KarmadaCertsSecret string
|
||||
KubeconfigSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy, KarmadaCertsSecret string
|
||||
KubeconfigSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KarmadaSearchName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaSearchName(name)),
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaSearchName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
KarmadaCertsSecret: util.KarmadaCertSecretName(name),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaSearchName(name)),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaSearch Deployment template: %w", err)
|
||||
|
@ -81,10 +82,11 @@ func installKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.Karm
|
|||
|
||||
func createKarmadaSearchService(client clientset.Interface, name, namespace string) error {
|
||||
searchServiceSetBytes, err := util.ParseTemplate(KarmadaSearchService, struct {
|
||||
ServiceName, Namespace string
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
}{
|
||||
ServiceName: util.KarmadaSearchName(name),
|
||||
Namespace: namespace,
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaSearchName(name),
|
||||
Namespace: namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaSearch Service template: %w", err)
|
||||
|
|
|
@ -25,59 +25,62 @@ metadata:
|
|||
name: {{ .DeploymentName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
karmada-app: karmada-webhook
|
||||
app.kubernetes.io/name: karmada-webhook
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
replicas: {{ .Replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
karmada-app: karmada-webhook
|
||||
app.kubernetes.io/name: karmada-webhook
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
karmada-app: karmada-webhook
|
||||
app.kubernetes.io/name: karmada-webhook
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-webhook
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --metrics-bind-address=:8080
|
||||
- --default-not-ready-toleration-seconds=30
|
||||
- --default-unreachable-toleration-seconds=30
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/var/serving-cert
|
||||
- --v=4
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: cert
|
||||
mountPath: /var/serving-cert
|
||||
readOnly: true
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
- name: karmada-webhook
|
||||
image: {{ .Image }}
|
||||
imagePullPolicy: {{ .ImagePullPolicy }}
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --metrics-bind-address=:8080
|
||||
- --default-not-ready-toleration-seconds=30
|
||||
- --default-unreachable-toleration-seconds=30
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/var/serving-cert
|
||||
- --v=4
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
mountPath: /etc/karmada/config
|
||||
- name: cert
|
||||
mountPath: /var/serving-cert
|
||||
readOnly: true
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
volumes:
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
- name: cert
|
||||
secret:
|
||||
secretName: {{ .WebhookCertsSecret }}
|
||||
- name: karmada-config
|
||||
secret:
|
||||
secretName: {{ .KubeconfigSecret }}
|
||||
- name: cert
|
||||
secret:
|
||||
secretName: {{ .WebhookCertsSecret }}
|
||||
`
|
||||
|
||||
// KarmadaWebhookService is karmada webhook service manifest
|
||||
|
@ -88,12 +91,15 @@ metadata:
|
|||
name: {{ .ServiceName }}
|
||||
namespace: {{ .Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: karmada-webhook
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
app.kubernetes.io/managed-by: karmada-operator
|
||||
spec:
|
||||
selector:
|
||||
karmada-app: karmada-webhook
|
||||
app.kubernetes.io/name: karmada-webhook
|
||||
app.kubernetes.io/instance: {{ .KarmadaInstanceName }}
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
`
|
||||
)
|
||||
|
|
|
@ -42,17 +42,18 @@ func EnsureKarmadaWebhook(client clientset.Interface, cfg *operatorv1alpha1.Karm
|
|||
|
||||
func installKarmadaWebhook(client clientset.Interface, cfg *operatorv1alpha1.KarmadaWebhook, name, namespace string, _ map[string]bool) error {
|
||||
webhookDeploymentSetBytes, err := util.ParseTemplate(KarmadaWebhookDeployment, struct {
|
||||
DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, WebhookCertsSecret string
|
||||
Replicas *int32
|
||||
KarmadaInstanceName, DeploymentName, Namespace, Image, ImagePullPolicy string
|
||||
KubeconfigSecret, WebhookCertsSecret string
|
||||
Replicas *int32
|
||||
}{
|
||||
DeploymentName: util.KarmadaWebhookName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaWebhookName(name)),
|
||||
WebhookCertsSecret: util.WebhookCertSecretName(name),
|
||||
KarmadaInstanceName: name,
|
||||
DeploymentName: util.KarmadaWebhookName(name),
|
||||
Namespace: namespace,
|
||||
Image: cfg.Image.Name(),
|
||||
ImagePullPolicy: string(cfg.ImagePullPolicy),
|
||||
Replicas: cfg.Replicas,
|
||||
KubeconfigSecret: util.ComponentKarmadaConfigSecretName(util.KarmadaWebhookName(name)),
|
||||
WebhookCertsSecret: util.WebhookCertSecretName(name),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaWebhook Deployment template: %w", err)
|
||||
|
@ -75,10 +76,11 @@ func installKarmadaWebhook(client clientset.Interface, cfg *operatorv1alpha1.Kar
|
|||
|
||||
func createKarmadaWebhookService(client clientset.Interface, name, namespace string) error {
|
||||
webhookServiceSetBytes, err := util.ParseTemplate(KarmadaWebhookService, struct {
|
||||
ServiceName, Namespace string
|
||||
KarmadaInstanceName, ServiceName, Namespace string
|
||||
}{
|
||||
ServiceName: util.KarmadaWebhookName(name),
|
||||
Namespace: namespace,
|
||||
KarmadaInstanceName: name,
|
||||
ServiceName: util.KarmadaWebhookName(name),
|
||||
Namespace: namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing KarmadaWebhook Service template: %w", err)
|
||||
|
|
|
@ -40,14 +40,14 @@ var (
|
|||
// the process will stop and return an error.
|
||||
failureThreshold = 3
|
||||
|
||||
etcdLabels = labels.Set{"karmada-app": constants.Etcd}
|
||||
karmadaApiserverLabels = labels.Set{"karmada-app": constants.KarmadaAPIServer}
|
||||
karmadaAggregatedAPIServerLabels = labels.Set{"karmada-app": names.KarmadaAggregatedAPIServerComponentName}
|
||||
kubeControllerManagerLabels = labels.Set{"karmada-app": constants.KubeControllerManager}
|
||||
karmadaControllerManagerLabels = labels.Set{"karmada-app": names.KarmadaControllerManagerComponentName}
|
||||
karmadaSchedulerLabels = labels.Set{"karmada-app": names.KarmadaSchedulerComponentName}
|
||||
karmadaWebhookLabels = labels.Set{"karmada-app": names.KarmadaWebhookComponentName}
|
||||
karmadaMetricAdapterLabels = labels.Set{"karmada-app": names.KarmadaMetricsAdapterComponentName}
|
||||
etcdLabels = labels.Set{constants.AppNameLabel: constants.Etcd}
|
||||
karmadaApiserverLabels = labels.Set{constants.AppNameLabel: constants.KarmadaAPIServer}
|
||||
karmadaAggregatedAPIServerLabels = labels.Set{constants.AppNameLabel: names.KarmadaAggregatedAPIServerComponentName}
|
||||
kubeControllerManagerLabels = labels.Set{constants.AppNameLabel: constants.KubeControllerManager}
|
||||
karmadaControllerManagerLabels = labels.Set{constants.AppNameLabel: names.KarmadaControllerManagerComponentName}
|
||||
karmadaSchedulerLabels = labels.Set{constants.AppNameLabel: names.KarmadaSchedulerComponentName}
|
||||
karmadaWebhookLabels = labels.Set{constants.AppNameLabel: names.KarmadaWebhookComponentName}
|
||||
karmadaMetricAdapterLabels = labels.Set{constants.AppNameLabel: names.KarmadaMetricsAdapterComponentName}
|
||||
)
|
||||
|
||||
// NewCheckApiserverHealthTask init wait-apiserver task
|
||||
|
@ -114,6 +114,7 @@ func runWaitControlPlaneSubTask(component string, ls labels.Set) func(r workflow
|
|||
return errors.New("wait-controlPlane task invoked with an invalid data struct")
|
||||
}
|
||||
|
||||
ls[constants.AppInstanceLabel] = data.GetName()
|
||||
waiter := apiclient.NewKarmadaWaiter(nil, data.RemoteClient(), componentBeReadyTimeout)
|
||||
if err := waiter.WaitForSomePods(ls.String(), data.GetNamespace(), 1); err != nil {
|
||||
return fmt.Errorf("waiting for %s to ready timeout, err: %w", component, err)
|
||||
|
|
|
@ -196,7 +196,7 @@ func TestWaitForAPIService(t *testing.T) {
|
|||
|
||||
func TestWaitForPods(t *testing.T) {
|
||||
name, namespace := "karmada-demo-apiserver", "test"
|
||||
karmadaAPIServerLabels := labels.Set{"karmada-app": constants.KarmadaAPIServer}
|
||||
karmadaAPIServerLabels := labels.Set{constants.AppNameLabel: constants.KarmadaAPIServer}
|
||||
var replicas int32 = 2
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -263,7 +263,7 @@ func TestWaitForPods(t *testing.T) {
|
|||
|
||||
func TestWaitForSomePods(t *testing.T) {
|
||||
name, namespace := "karmada-demo-apiserver", "test"
|
||||
karmadaAPIServerLabels := labels.Set{"karmada-app": constants.KarmadaAPIServer}
|
||||
karmadaAPIServerLabels := labels.Set{constants.AppNameLabel: constants.KarmadaAPIServer}
|
||||
var replicas int32 = 2
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -78,7 +78,9 @@ func (c *CronFHPAController) Reconcile(ctx context.Context, req controllerruntim
|
|||
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveProcessCronFederatedHPALatency(err, startTime)
|
||||
defer func() {
|
||||
metrics.ObserveProcessCronFederatedHPALatency(err, startTime)
|
||||
}()
|
||||
|
||||
origRuleSets := sets.New[string]()
|
||||
for _, history := range cronFHPA.Status.ExecutionHistories {
|
||||
|
|
|
@ -74,7 +74,7 @@ func CreateOrUpdateWork(ctx context.Context, c client.Client, workMeta metav1.Ob
|
|||
|
||||
runtimeObject.Labels = util.DedupeAndMergeLabels(runtimeObject.Labels, work.Labels)
|
||||
runtimeObject.Annotations = util.DedupeAndMergeAnnotations(runtimeObject.Annotations, work.Annotations)
|
||||
runtimeObject.Finalizers = work.Finalizers
|
||||
runtimeObject.Finalizers = util.MergeFinalizers(runtimeObject.Finalizers, work.Finalizers)
|
||||
runtimeObject.Spec = work.Spec
|
||||
|
||||
// Do the same thing as the mutating webhook does, add the permanent ID to workload if not exist,
|
||||
|
|
|
@ -176,7 +176,9 @@ func (c *FHPAController) Reconcile(ctx context.Context, req controllerruntime.Re
|
|||
// observe process FederatedHPA latency
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveProcessFederatedHPALatency(err, startTime)
|
||||
defer func() {
|
||||
metrics.ObserveProcessFederatedHPALatency(err, startTime)
|
||||
}()
|
||||
|
||||
err = c.reconcileAutoscaler(ctx, hpa)
|
||||
if err != nil {
|
||||
|
@ -575,7 +577,6 @@ func (c *FHPAController) buildPodInformerForCluster(clusterScaleClient *util.Clu
|
|||
return nil
|
||||
}(); err != nil {
|
||||
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", clusterScaleClient.ClusterName, err)
|
||||
c.TypedInformerManager.Stop(clusterScaleClient.ClusterName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,9 @@ func (c *resourceMetricsClient) GetResourceMetric(ctx context.Context, resource
|
|||
// observe pull ResourceMetric latency
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveFederatedHPAPullMetricsLatency(err, "ResourceMetric", startTime)
|
||||
defer func() {
|
||||
metrics.ObserveFederatedHPAPullMetricsLatency(err, "ResourceMetric", startTime)
|
||||
}()
|
||||
|
||||
podMetrics, err := c.client.PodMetricses(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
|
@ -154,7 +156,9 @@ func (c *customMetricsClient) GetRawMetric(metricName string, namespace string,
|
|||
// observe pull RawMetric latency
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveFederatedHPAPullMetricsLatency(err, "RawMetric", startTime)
|
||||
defer func() {
|
||||
metrics.ObserveFederatedHPAPullMetricsLatency(err, "RawMetric", startTime)
|
||||
}()
|
||||
|
||||
metricList, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName, metricSelector)
|
||||
if err != nil {
|
||||
|
@ -191,7 +195,9 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin
|
|||
// observe pull ObjectMetric latency
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveFederatedHPAPullMetricsLatency(err, "ObjectMetric", startTime)
|
||||
defer func() {
|
||||
metrics.ObserveFederatedHPAPullMetricsLatency(err, "ObjectMetric", startTime)
|
||||
}()
|
||||
|
||||
gvk := schema.FromAPIVersionAndKind(objectRef.APIVersion, objectRef.Kind)
|
||||
var metricValue *customapi.MetricValue
|
||||
|
@ -223,7 +229,9 @@ func (c *externalMetricsClient) GetExternalMetric(metricName, namespace string,
|
|||
// observe pull ExternalMetric latency
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
defer metrics.ObserveFederatedHPAPullMetricsLatency(err, "ExternalMetric", startTime)
|
||||
defer func() {
|
||||
metrics.ObserveFederatedHPAPullMetricsLatency(err, "ExternalMetric", startTime)
|
||||
}()
|
||||
|
||||
externalMetrics, err := c.client.NamespacedMetrics(namespace).List(metricName, selector)
|
||||
if err != nil {
|
||||
|
|
|
@ -78,7 +78,12 @@ func (c *CRBGracefulEvictionController) Reconcile(ctx context.Context, req contr
|
|||
}
|
||||
|
||||
func (c *CRBGracefulEvictionController) syncBinding(ctx context.Context, binding *workv1alpha2.ClusterResourceBinding) (time.Duration, error) {
|
||||
keptTask, evictedClusters := assessEvictionTasks(binding.Spec, binding.Status.AggregatedStatus, c.GracefulEvictionTimeout, metav1.Now())
|
||||
keptTask, evictedClusters := assessEvictionTasks(binding.Spec.GracefulEvictionTasks, metav1.Now(), assessmentOption{
|
||||
timeout: c.GracefulEvictionTimeout,
|
||||
scheduleResult: binding.Spec.Clusters,
|
||||
observedStatus: binding.Status.AggregatedStatus,
|
||||
hasScheduled: binding.Status.SchedulerObservedGeneration == binding.Generation,
|
||||
})
|
||||
if reflect.DeepEqual(binding.Spec.GracefulEvictionTasks, keptTask) {
|
||||
return nextRetry(keptTask, c.GracefulEvictionTimeout, metav1.Now().Time), nil
|
||||
}
|
||||
|
@ -104,21 +109,13 @@ func (c *CRBGracefulEvictionController) SetupWithManager(mgr controllerruntime.M
|
|||
clusterResourceBindingPredicateFn := predicate.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent) bool {
|
||||
newObj := createEvent.Object.(*workv1alpha2.ClusterResourceBinding)
|
||||
if len(newObj.Spec.GracefulEvictionTasks) == 0 {
|
||||
return false
|
||||
}
|
||||
// When the current component is restarted and there are still tasks in the
|
||||
// GracefulEvictionTasks queue, we need to continue the procession.
|
||||
return newObj.Status.SchedulerObservedGeneration == newObj.Generation
|
||||
return len(newObj.Spec.GracefulEvictionTasks) != 0
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
newObj := updateEvent.ObjectNew.(*workv1alpha2.ClusterResourceBinding)
|
||||
|
||||
if len(newObj.Spec.GracefulEvictionTasks) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return newObj.Status.SchedulerObservedGeneration == newObj.Generation
|
||||
return len(newObj.Spec.GracefulEvictionTasks) != 0
|
||||
},
|
||||
DeleteFunc: func(event.DeleteEvent) bool { return false },
|
||||
GenericFunc: func(event.GenericEvent) bool { return false },
|
||||
|
|
|
@ -67,7 +67,8 @@ func TestCRBGracefulEvictionController_Reconcile(t *testing.T) {
|
|||
name: "binding with active graceful eviction tasks",
|
||||
binding: &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-binding",
|
||||
Name: "test-binding",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{
|
||||
|
|
|
@ -29,19 +29,17 @@ type assessmentOption struct {
|
|||
timeout time.Duration
|
||||
scheduleResult []workv1alpha2.TargetCluster
|
||||
observedStatus []workv1alpha2.AggregatedStatusItem
|
||||
hasScheduled bool
|
||||
}
|
||||
|
||||
// assessEvictionTasks assesses each task according to graceful eviction rules and
|
||||
// returns the tasks that should be kept.
|
||||
func assessEvictionTasks(bindingSpec workv1alpha2.ResourceBindingSpec,
|
||||
observedStatus []workv1alpha2.AggregatedStatusItem,
|
||||
timeout time.Duration,
|
||||
now metav1.Time,
|
||||
) ([]workv1alpha2.GracefulEvictionTask, []string) {
|
||||
// The now time is used as the input parameter to facilitate the unit test.
|
||||
func assessEvictionTasks(tasks []workv1alpha2.GracefulEvictionTask, now metav1.Time, opt assessmentOption) ([]workv1alpha2.GracefulEvictionTask, []string) {
|
||||
var keptTasks []workv1alpha2.GracefulEvictionTask
|
||||
var evictedClusters []string
|
||||
|
||||
for _, task := range bindingSpec.GracefulEvictionTasks {
|
||||
for _, task := range tasks {
|
||||
// set creation timestamp for new task
|
||||
if task.CreationTimestamp.IsZero() {
|
||||
task.CreationTimestamp = &now
|
||||
|
@ -49,12 +47,12 @@ func assessEvictionTasks(bindingSpec workv1alpha2.ResourceBindingSpec,
|
|||
continue
|
||||
}
|
||||
|
||||
if task.GracePeriodSeconds != nil {
|
||||
opt.timeout = time.Duration(*task.GracePeriodSeconds) * time.Second
|
||||
}
|
||||
|
||||
// assess task according to observed status
|
||||
kt := assessSingleTask(task, assessmentOption{
|
||||
scheduleResult: bindingSpec.Clusters,
|
||||
timeout: timeout,
|
||||
observedStatus: observedStatus,
|
||||
})
|
||||
kt := assessSingleTask(task, opt)
|
||||
if kt != nil {
|
||||
keptTasks = append(keptTasks, *kt)
|
||||
} else {
|
||||
|
@ -75,16 +73,14 @@ func assessSingleTask(task workv1alpha2.GracefulEvictionTask, opt assessmentOpti
|
|||
return nil
|
||||
}
|
||||
|
||||
timeout := opt.timeout
|
||||
if task.GracePeriodSeconds != nil {
|
||||
timeout = time.Duration(*task.GracePeriodSeconds) * time.Second
|
||||
}
|
||||
// task exceeds timeout
|
||||
if metav1.Now().After(task.CreationTimestamp.Add(timeout)) {
|
||||
if metav1.Now().After(task.CreationTimestamp.Add(opt.timeout)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if allScheduledResourceInHealthyState(opt) {
|
||||
// Only when the binding object has been scheduled can further judgment be made.
|
||||
// Otherwise, the binding status may be the old, which will affect the correctness of the judgment.
|
||||
if opt.hasScheduled && allScheduledResourceInHealthyState(opt) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -117,7 +113,7 @@ func allScheduledResourceInHealthyState(opt assessmentOption) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func nextRetry(tasks []workv1alpha2.GracefulEvictionTask, timeout time.Duration, timeNow time.Time) time.Duration {
|
||||
func nextRetry(tasks []workv1alpha2.GracefulEvictionTask, gracefulTimeout time.Duration, timeNow time.Time) time.Duration {
|
||||
if len(tasks) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
@ -132,6 +128,7 @@ func nextRetry(tasks []workv1alpha2.GracefulEvictionTask, timeout time.Duration,
|
|||
if tasks[i].SuppressDeletion != nil {
|
||||
continue
|
||||
}
|
||||
timeout := gracefulTimeout
|
||||
if tasks[i].GracePeriodSeconds != nil {
|
||||
timeout = time.Duration(*tasks[i].GracePeriodSeconds) * time.Second
|
||||
}
|
||||
|
|
|
@ -41,17 +41,15 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
want *workv1alpha2.GracefulEvictionTask
|
||||
}{
|
||||
{
|
||||
name: "task that doesn't exceed the timeout",
|
||||
name: "task doesn't exceed the timeout, hasScheduled is false, task has no change",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
timeout: timeout,
|
||||
hasScheduled: false,
|
||||
},
|
||||
},
|
||||
want: &workv1alpha2.GracefulEvictionTask{
|
||||
|
@ -60,7 +58,7 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "task that exceeds the timeout",
|
||||
name: "task exceeds the timeout, task will be nil",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
|
@ -68,15 +66,12 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "binding scheduled result is healthy, task should be nil",
|
||||
name: "task doesn't exceed the timeout, hasScheduled is true, scheduled result is healthy, task will be nil",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
|
@ -90,12 +85,13 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
},
|
||||
hasScheduled: true,
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "binding scheduled result is unhealthy, task has no effect",
|
||||
name: "task doesn't exceed the timeout, hasScheduled is true, scheduled result is unhealthy, task has no change",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
|
@ -109,6 +105,7 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceUnhealthy},
|
||||
},
|
||||
hasScheduled: true,
|
||||
},
|
||||
},
|
||||
want: &workv1alpha2.GracefulEvictionTask{
|
||||
|
@ -117,7 +114,7 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "binding scheduled result is unknown, task has no effect",
|
||||
name: "task doesn't exceed the timeout, hasScheduled is true, scheduled result is unknown, task has no change",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
|
@ -131,6 +128,7 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceUnknown},
|
||||
},
|
||||
hasScheduled: true,
|
||||
},
|
||||
},
|
||||
want: &workv1alpha2.GracefulEvictionTask{
|
||||
|
@ -139,66 +137,13 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "gracePeriodSeconds is declared in gracefulEvictionTask and timeout is not reached",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
GracePeriodSeconds: ptr.To[int32](30),
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceUnknown},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "gracePeriodSeconds is declared in gracefulEvictionTask and timeout is reached",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
GracePeriodSeconds: ptr.To[int32](120),
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceUnknown},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
GracePeriodSeconds: ptr.To[int32](120),
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "suppressDeletion is declared in gracefulEvictionTask and is true",
|
||||
name: "suppressDeletion is declared, value is true",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
SuppressDeletion: ptr.To[bool](true),
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
|
@ -207,22 +152,13 @@ func Test_assessSingleTask(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "suppressDeletion is declared in gracefulEvictionTask and is false",
|
||||
name: "suppressDeletion is declared, value is false",
|
||||
args: args{
|
||||
task: workv1alpha2.GracefulEvictionTask{
|
||||
FromCluster: "member1",
|
||||
SuppressDeletion: ptr.To[bool](false),
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
opt: assessmentOption{
|
||||
timeout: timeout,
|
||||
scheduleResult: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
|
@ -245,6 +181,7 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
observedStatus []workv1alpha2.AggregatedStatusItem
|
||||
timeout time.Duration
|
||||
now metav1.Time
|
||||
hasScheduled bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -256,17 +193,13 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
name: "tasks without creation timestamp",
|
||||
args: args{
|
||||
bindingSpec: workv1alpha2.ResourceBindingSpec{
|
||||
Clusters: []workv1alpha2.TargetCluster{
|
||||
{Name: "memberA"},
|
||||
},
|
||||
GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{
|
||||
{FromCluster: "member1"},
|
||||
{FromCluster: "member2"},
|
||||
},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
|
@ -281,7 +214,7 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
wantCluster: nil,
|
||||
},
|
||||
{
|
||||
name: "tasks that do not exceed the timeout should do nothing",
|
||||
name: "all tasks do not exceed the timeout, but hasScheduled is false, all tasks should do nothing",
|
||||
args: args{
|
||||
bindingSpec: workv1alpha2.ResourceBindingSpec{
|
||||
Clusters: []workv1alpha2.TargetCluster{
|
||||
|
@ -301,6 +234,7 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
observedStatus: []workv1alpha2.AggregatedStatusItem{},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: false,
|
||||
},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
|
@ -315,7 +249,7 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
wantCluster: nil,
|
||||
},
|
||||
{
|
||||
name: "tasks that exceed the timeout should be removed",
|
||||
name: "task that exceed the timeout should be removed, task do not exceed the timeout should do nothing",
|
||||
args: args{
|
||||
bindingSpec: workv1alpha2.ResourceBindingSpec{
|
||||
Clusters: []workv1alpha2.TargetCluster{
|
||||
|
@ -328,16 +262,22 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
},
|
||||
{
|
||||
FromCluster: "member2",
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -5)},
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
},
|
||||
},
|
||||
observedStatus: []workv1alpha2.AggregatedStatusItem{},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: true,
|
||||
},
|
||||
wantTask: nil,
|
||||
wantCluster: []string{"member1", "member2"},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
FromCluster: "member2",
|
||||
CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -1)},
|
||||
},
|
||||
},
|
||||
wantCluster: []string{"member1"},
|
||||
},
|
||||
{
|
||||
name: "mixed tasks",
|
||||
|
@ -399,8 +339,9 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
observedStatus: []workv1alpha2.AggregatedStatusItem{
|
||||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: true,
|
||||
},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
|
@ -437,8 +378,9 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
{ClusterName: "memberB", Health: workv1alpha2.ResourceHealthy},
|
||||
},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: true,
|
||||
},
|
||||
wantTask: nil,
|
||||
wantCluster: []string{"member1", "member2"},
|
||||
|
@ -466,8 +408,9 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
{ClusterName: "memberB", Health: workv1alpha2.ResourceUnhealthy},
|
||||
},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: true,
|
||||
},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
|
@ -504,8 +447,9 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
{ClusterName: "memberA", Health: workv1alpha2.ResourceHealthy},
|
||||
{ClusterName: "memberB", Health: workv1alpha2.ResourceUnknown},
|
||||
},
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
timeout: timeout,
|
||||
now: timeNow,
|
||||
hasScheduled: true,
|
||||
},
|
||||
wantTask: []workv1alpha2.GracefulEvictionTask{
|
||||
{
|
||||
|
@ -522,7 +466,12 @@ func Test_assessEvictionTasks(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotTask, gotCluster := assessEvictionTasks(tt.args.bindingSpec, tt.args.observedStatus, tt.args.timeout, tt.args.now); !reflect.DeepEqual(gotTask, tt.wantTask) || !reflect.DeepEqual(gotCluster, tt.wantCluster) {
|
||||
if gotTask, gotCluster := assessEvictionTasks(tt.args.bindingSpec.GracefulEvictionTasks, tt.args.now, assessmentOption{
|
||||
timeout: tt.args.timeout,
|
||||
scheduleResult: tt.args.bindingSpec.Clusters,
|
||||
observedStatus: tt.args.observedStatus,
|
||||
hasScheduled: true,
|
||||
}); !reflect.DeepEqual(gotTask, tt.wantTask) || !reflect.DeepEqual(gotCluster, tt.wantCluster) {
|
||||
t.Errorf("assessEvictionTasks() = (%v, %v), want (%v, %v)", gotTask, gotCluster, tt.wantTask, tt.wantCluster)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -78,7 +78,12 @@ func (c *RBGracefulEvictionController) Reconcile(ctx context.Context, req contro
|
|||
}
|
||||
|
||||
func (c *RBGracefulEvictionController) syncBinding(ctx context.Context, binding *workv1alpha2.ResourceBinding) (time.Duration, error) {
|
||||
keptTask, evictedCluster := assessEvictionTasks(binding.Spec, binding.Status.AggregatedStatus, c.GracefulEvictionTimeout, metav1.Now())
|
||||
keptTask, evictedCluster := assessEvictionTasks(binding.Spec.GracefulEvictionTasks, metav1.Now(), assessmentOption{
|
||||
timeout: c.GracefulEvictionTimeout,
|
||||
scheduleResult: binding.Spec.Clusters,
|
||||
observedStatus: binding.Status.AggregatedStatus,
|
||||
hasScheduled: binding.Status.SchedulerObservedGeneration == binding.Generation,
|
||||
})
|
||||
if reflect.DeepEqual(binding.Spec.GracefulEvictionTasks, keptTask) {
|
||||
return nextRetry(keptTask, c.GracefulEvictionTimeout, metav1.Now().Time), nil
|
||||
}
|
||||
|
@ -104,21 +109,13 @@ func (c *RBGracefulEvictionController) SetupWithManager(mgr controllerruntime.Ma
|
|||
resourceBindingPredicateFn := predicate.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent) bool {
|
||||
newObj := createEvent.Object.(*workv1alpha2.ResourceBinding)
|
||||
if len(newObj.Spec.GracefulEvictionTasks) == 0 {
|
||||
return false
|
||||
}
|
||||
// When the current component is restarted and there are still tasks in the
|
||||
// GracefulEvictionTasks queue, we need to continue the procession.
|
||||
return newObj.Status.SchedulerObservedGeneration == newObj.Generation
|
||||
return len(newObj.Spec.GracefulEvictionTasks) != 0
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
newObj := updateEvent.ObjectNew.(*workv1alpha2.ResourceBinding)
|
||||
|
||||
if len(newObj.Spec.GracefulEvictionTasks) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return newObj.Status.SchedulerObservedGeneration == newObj.Generation
|
||||
return len(newObj.Spec.GracefulEvictionTasks) != 0
|
||||
},
|
||||
DeleteFunc: func(event.DeleteEvent) bool { return false },
|
||||
GenericFunc: func(event.GenericEvent) bool { return false },
|
||||
|
|
|
@ -69,8 +69,9 @@ func TestRBGracefulEvictionController_Reconcile(t *testing.T) {
|
|||
name: "binding with active graceful eviction tasks",
|
||||
binding: &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-binding",
|
||||
Namespace: "default",
|
||||
Name: "test-binding",
|
||||
Namespace: "default",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{
|
||||
|
|
|
@ -146,6 +146,7 @@ func (c *EndpointSliceController) collectEndpointSliceFromWork(ctx context.Conte
|
|||
desiredEndpointSlice.Labels = util.DedupeAndMergeLabels(desiredEndpointSlice.Labels, map[string]string{
|
||||
workv1alpha2.WorkPermanentIDLabel: work.Labels[workv1alpha2.WorkPermanentIDLabel],
|
||||
discoveryv1.LabelServiceName: names.GenerateDerivedServiceName(work.Labels[util.ServiceNameLabel]),
|
||||
discoveryv1.LabelManagedBy: util.EndpointSliceControllerLabelValue,
|
||||
})
|
||||
desiredEndpointSlice.Annotations = util.DedupeAndMergeAnnotations(desiredEndpointSlice.Annotations, map[string]string{
|
||||
workv1alpha2.WorkNamespaceAnnotation: work.Namespace,
|
||||
|
|
|
@ -273,7 +273,6 @@ func (c *ServiceExportController) registerInformersAndStart(cluster *clusterv1al
|
|||
return nil
|
||||
}(); err != nil {
|
||||
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", cluster.Name, err)
|
||||
c.InformerManager.Stop(cluster.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -374,6 +373,11 @@ func (c *ServiceExportController) handleEndpointSliceEvent(ctx context.Context,
|
|||
return err
|
||||
}
|
||||
|
||||
// Exclude EndpointSlice resources that are managed by Karmada system to avoid duplicate reporting.
|
||||
if helper.IsEndpointSliceManagedByKarmada(endpointSliceObj.GetLabels()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = c.reportEndpointSliceWithEndpointSliceCreateOrUpdate(ctx, endpointSliceKey.Cluster, endpointSliceObj); err != nil {
|
||||
klog.Errorf("Failed to handle endpointSlice(%s) event, Error: %v",
|
||||
endpointSliceKey.NamespaceKey(), err)
|
||||
|
@ -396,6 +400,14 @@ func (c *ServiceExportController) reportEndpointSliceWithServiceExportCreate(ctx
|
|||
return nil
|
||||
}
|
||||
|
||||
// Before retrieving EndpointSlice objects from the informer, ensure the informer cache is synced.
|
||||
// This is necessary because the informer for EndpointSlice is created dynamically in the Reconcile() routine
|
||||
// when a Work resource containing an ServiceExport is detected for the cluster. If the informer is not yet synced,
|
||||
// return an error and wait a retry at the next time.
|
||||
if !singleClusterManager.IsInformerSynced(endpointSliceGVR) {
|
||||
return fmt.Errorf("the informer for cluster %s has not been synced, wait a retry at the next time", serviceExportKey.Cluster)
|
||||
}
|
||||
|
||||
endpointSliceLister := singleClusterManager.Lister(endpointSliceGVR)
|
||||
if endpointSliceObjects, err = endpointSliceLister.ByNamespace(serviceExportKey.Namespace).List(labels.SelectorFromSet(labels.Set{
|
||||
discoveryv1.LabelServiceName: serviceExportKey.Name,
|
||||
|
@ -409,7 +421,13 @@ func (c *ServiceExportController) reportEndpointSliceWithServiceExportCreate(ctx
|
|||
}
|
||||
|
||||
for index := range endpointSliceObjects {
|
||||
if err = reportEndpointSlice(ctx, c.Client, endpointSliceObjects[index].(*unstructured.Unstructured), serviceExportKey.Cluster); err != nil {
|
||||
endpointSlice := endpointSliceObjects[index].(*unstructured.Unstructured)
|
||||
// Exclude EndpointSlice resources that are managed by Karmada system to avoid duplicate reporting.
|
||||
if helper.IsEndpointSliceManagedByKarmada(endpointSlice.GetLabels()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = reportEndpointSlice(ctx, c.Client, endpointSlice, serviceExportKey.Cluster); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
@ -471,6 +489,14 @@ func (c *ServiceExportController) reportEndpointSliceWithEndpointSliceCreateOrUp
|
|||
return nil
|
||||
}
|
||||
|
||||
// Before retrieving ServiceExport objects from the informer, ensure the informer cache is synced.
|
||||
// This is necessary because the informer for ServiceExport is created dynamically in the Reconcile() routine
|
||||
// when a Work resource containing an ServiceExport is detected for the cluster. If the informer is not yet synced,
|
||||
// return an error and wait a retry at the next time.
|
||||
if !singleClusterManager.IsInformerSynced(serviceExportGVR) {
|
||||
return fmt.Errorf("the informer for cluster %s has not been synced, wait a retry at the next time", clusterName)
|
||||
}
|
||||
|
||||
serviceExportLister := singleClusterManager.Lister(serviceExportGVR)
|
||||
_, err := serviceExportLister.ByNamespace(endpointSlice.GetNamespace()).Get(relatedServiceName)
|
||||
if err != nil {
|
||||
|
@ -513,10 +539,14 @@ func getEndpointSliceWorkMeta(ctx context.Context, c client.Client, ns string, w
|
|||
return metav1.ObjectMeta{}, err
|
||||
}
|
||||
|
||||
existFinalizers := existWork.GetFinalizers()
|
||||
finalizersToAdd := []string{util.EndpointSliceControllerFinalizer}
|
||||
newFinalizers := util.MergeFinalizers(existFinalizers, finalizersToAdd)
|
||||
|
||||
workMeta := metav1.ObjectMeta{
|
||||
Name: workName,
|
||||
Namespace: ns,
|
||||
Finalizers: []string{util.EndpointSliceControllerFinalizer},
|
||||
Finalizers: newFinalizers,
|
||||
Labels: map[string]string{
|
||||
util.ServiceNamespaceLabel: endpointSlice.GetNamespace(),
|
||||
util.ServiceNameLabel: endpointSlice.GetLabels()[discoveryv1.LabelServiceName],
|
||||
|
@ -603,6 +633,7 @@ func cleanEndpointSliceWork(ctx context.Context, c client.Client, work *workv1al
|
|||
klog.Errorf("Failed to update work(%s/%s): %v", work.Namespace, work.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("Successfully updated work(%s/%s)", work.Namespace, work.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -610,6 +641,7 @@ func cleanEndpointSliceWork(ctx context.Context, c client.Client, work *workv1al
|
|||
klog.Errorf("Failed to delete work(%s/%s), Error: %v", work.Namespace, work.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("Successfully deleted work(%s/%s)", work.Namespace, work.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -215,7 +215,6 @@ func (c *EndpointSliceCollectController) registerInformersAndStart(cluster *clus
|
|||
return nil
|
||||
}(); err != nil {
|
||||
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", cluster.Name, err)
|
||||
c.InformerManager.Stop(cluster.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -291,7 +290,8 @@ func (c *EndpointSliceCollectController) handleEndpointSliceEvent(ctx context.Co
|
|||
return err
|
||||
}
|
||||
|
||||
if util.GetLabelValue(endpointSliceObj.GetLabels(), discoveryv1.LabelManagedBy) == util.EndpointSliceDispatchControllerLabelValue {
|
||||
// Exclude EndpointSlice resources that are managed by Karmada system to avoid duplicate reporting.
|
||||
if helper.IsEndpointSliceManagedByKarmada(endpointSliceObj.GetLabels()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -350,7 +350,8 @@ func (c *EndpointSliceCollectController) collectTargetEndpointSlice(ctx context.
|
|||
klog.Errorf("Failed to convert object to EndpointSlice, error: %v", err)
|
||||
return err
|
||||
}
|
||||
if util.GetLabelValue(eps.GetLabels(), discoveryv1.LabelManagedBy) == util.EndpointSliceDispatchControllerLabelValue {
|
||||
// Exclude EndpointSlice resources that are managed by Karmada system to avoid duplicate reporting.
|
||||
if helper.IsEndpointSliceManagedByKarmada(eps.GetLabels()) {
|
||||
continue
|
||||
}
|
||||
epsUnstructured, err := helper.ToUnstructured(eps)
|
||||
|
@ -405,6 +406,10 @@ func getEndpointSliceWorkMeta(ctx context.Context, c client.Client, ns string, w
|
|||
return metav1.ObjectMeta{}, err
|
||||
}
|
||||
|
||||
existFinalizers := existWork.GetFinalizers()
|
||||
finalizersToAdd := []string{util.MCSEndpointSliceDispatchControllerFinalizer}
|
||||
newFinalizers := util.MergeFinalizers(existFinalizers, finalizersToAdd)
|
||||
|
||||
ls := map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: endpointSlice.GetNamespace(),
|
||||
util.MultiClusterServiceNameLabel: endpointSlice.GetLabels()[discoveryv1.LabelServiceName],
|
||||
|
@ -413,7 +418,12 @@ func getEndpointSliceWorkMeta(ctx context.Context, c client.Client, ns string, w
|
|||
util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind,
|
||||
}
|
||||
if existWork.Labels == nil || (err != nil && apierrors.IsNotFound(err)) {
|
||||
workMeta := metav1.ObjectMeta{Name: workName, Namespace: ns, Labels: ls}
|
||||
workMeta := metav1.ObjectMeta{
|
||||
Name: workName,
|
||||
Namespace: ns,
|
||||
Labels: ls,
|
||||
Finalizers: newFinalizers,
|
||||
}
|
||||
return workMeta, nil
|
||||
}
|
||||
|
||||
|
@ -428,7 +438,7 @@ func getEndpointSliceWorkMeta(ctx context.Context, c client.Client, ns string, w
|
|||
Name: workName,
|
||||
Namespace: ns,
|
||||
Labels: ls,
|
||||
Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
Finalizers: newFinalizers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -147,10 +147,11 @@ func TestGetEndpointSliceWorkMeta(t *testing.T) {
|
|||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind,
|
||||
},
|
||||
Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Existing work for EndpointSlice",
|
||||
name: "Existing work for EndpointSlice without finalizers",
|
||||
existingWork: createExistingWork("endpointslice-test-eps-default", "test-cluster", "ExistingController"),
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
|
@ -165,6 +166,54 @@ func TestGetEndpointSliceWorkMeta(t *testing.T) {
|
|||
Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Existing work with existing finalizers",
|
||||
existingWork: createExistingWorkWithFinalizers("endpointslice-test-eps-default", "test-cluster", "ExistingController", []string{"existing.finalizer", "another.finalizer"}),
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
Name: "endpointslice-test-eps-default",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService",
|
||||
},
|
||||
Finalizers: []string{"another.finalizer", "existing.finalizer", util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Existing work with duplicate finalizer",
|
||||
existingWork: createExistingWorkWithFinalizers("endpointslice-test-eps-default", "test-cluster", "ExistingController", []string{util.MCSEndpointSliceDispatchControllerFinalizer, "another.finalizer"}),
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
Name: "endpointslice-test-eps-default",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService",
|
||||
},
|
||||
Finalizers: []string{"another.finalizer", util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Existing work without labels",
|
||||
existingWork: createExistingWorkWithoutLabels("endpointslice-test-eps-default", "test-cluster", []string{"existing.finalizer"}),
|
||||
endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false),
|
||||
expectedMeta: metav1.ObjectMeta{
|
||||
Name: "endpointslice-test-eps-default",
|
||||
Namespace: "test-cluster",
|
||||
Labels: map[string]string{
|
||||
util.MultiClusterServiceNamespaceLabel: "default",
|
||||
util.MultiClusterServiceNameLabel: "test-service",
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind,
|
||||
},
|
||||
Finalizers: []string{"existing.finalizer", util.MCSEndpointSliceDispatchControllerFinalizer},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Nil EndpointSlice",
|
||||
endpointSlice: nil,
|
||||
|
@ -188,7 +237,10 @@ func TestGetEndpointSliceWorkMeta(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedMeta.Name, meta.Name)
|
||||
assert.Equal(t, tc.expectedMeta.Namespace, meta.Namespace)
|
||||
assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers)
|
||||
|
||||
assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers,
|
||||
"Finalizers do not match. Expected: %v, Got: %v", tc.expectedMeta.Finalizers, meta.Finalizers)
|
||||
|
||||
assert.True(t, compareLabels(meta.Labels, tc.expectedMeta.Labels),
|
||||
"Labels do not match. Expected: %v, Got: %v", tc.expectedMeta.Labels, meta.Labels)
|
||||
}
|
||||
|
@ -327,6 +379,31 @@ func createExistingWork(name, namespace, managedBy string) *workv1alpha1.Work {
|
|||
}
|
||||
}
|
||||
|
||||
// Helper function to create an existing Work resource for testing with specific finalizers
|
||||
func createExistingWorkWithFinalizers(name, namespace, managedBy string, finalizers []string) *workv1alpha1.Work {
|
||||
return &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
util.EndpointSliceWorkManagedByLabel: managedBy,
|
||||
},
|
||||
Finalizers: finalizers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create an existing Work resource for testing without labels
|
||||
func createExistingWorkWithoutLabels(name, namespace string, finalizers []string) *workv1alpha1.Work {
|
||||
return &workv1alpha1.Work{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Finalizers: finalizers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a fake client with an optional existing Work
|
||||
func createFakeClient(existingWork *workv1alpha1.Work) client.Client {
|
||||
scheme := setupSchemeEndpointCollect()
|
||||
|
|
|
@ -173,7 +173,7 @@ func (c *ClusterStatusController) SetupWithManager(mgr controllerruntime.Manager
|
|||
}
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
Named(ControllerName).
|
||||
For(&clusterv1alpha1.Cluster{}, builder.WithPredicates(c.PredicateFunc)).
|
||||
For(&clusterv1alpha1.Cluster{}, builder.WithPredicates(c.PredicateFunc, predicate.GenerationChangedPredicate{})).
|
||||
WithOptions(controller.Options{
|
||||
RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](c.RateLimiterOptions),
|
||||
}).Complete(c)
|
||||
|
@ -391,7 +391,6 @@ func (c *ClusterStatusController) buildInformerForCluster(clusterClient *util.Cl
|
|||
return nil
|
||||
}(); err != nil {
|
||||
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", clusterClient.ClusterName, err)
|
||||
c.TypedInformerManager.Stop(clusterClient.ClusterName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -294,6 +294,11 @@ func (c *WorkStatusController) handleDeleteEvent(ctx context.Context, key keys.F
|
|||
return nil
|
||||
}
|
||||
|
||||
// skip processing as the work object is suspended for dispatching.
|
||||
if helper.IsWorkSuspendDispatching(work) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if util.GetLabelValue(work.Labels, util.PropagationInstruction) == util.PropagationInstructionSuppressed {
|
||||
return nil
|
||||
}
|
||||
|
@ -499,7 +504,6 @@ func (c *WorkStatusController) registerInformersAndStart(cluster *clusterv1alpha
|
|||
return nil
|
||||
}(); err != nil {
|
||||
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", cluster.Name, err)
|
||||
c.InformerManager.Stop(cluster.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -578,6 +579,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
expectedError bool
|
||||
wrongWorkNS bool
|
||||
workApplyFunc func(work *workv1alpha1.Work)
|
||||
assertFunc func(t *testing.T, dynamicClientSets *dynamicfake.FakeDynamicClient)
|
||||
}{
|
||||
{
|
||||
name: "failed to exec NeedUpdate",
|
||||
|
@ -668,6 +670,23 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
work.SetDeletionTimestamp(ptr.To(metav1.Now()))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource not found, work suspendDispatching true, should not recreate resource",
|
||||
obj: newPodObj("karmada-es-cluster"),
|
||||
pod: nil, // Simulate the resource does not exist in the member cluster
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
expectedError: false,
|
||||
workApplyFunc: func(work *workv1alpha1.Work) {
|
||||
work.Spec.SuspendDispatching = ptr.To(true)
|
||||
},
|
||||
assertFunc: func(t *testing.T, dynamicClientSets *dynamicfake.FakeDynamicClient) {
|
||||
gvr := corev1.SchemeGroupVersion.WithResource("pods")
|
||||
obj, err := dynamicClientSets.Resource(gvr).Namespace("default").Get(context.Background(), "pod", metav1.GetOptions{})
|
||||
assert.True(t, apierrors.IsNotFound(err), "expected a NotFound error but got: %s", err)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -708,6 +727,10 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
if tt.assertFunc != nil {
|
||||
tt.assertFunc(t, dynamicClientSet)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -337,13 +337,26 @@ func (d *ResourceDetector) OnUpdate(oldObj, newObj interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
resourceChangeByKarmada := eventfilter.ResourceChangeByKarmada(unstructuredOldObj, unstructuredNewObj)
|
||||
|
||||
resourceItem := ResourceItem{
|
||||
Obj: newRuntimeObj,
|
||||
ResourceChangeByKarmada: resourceChangeByKarmada,
|
||||
isLazyActivation, err := d.isClaimedByLazyPolicy(unstructuredNewObj)
|
||||
if err != nil {
|
||||
// should never come here
|
||||
klog.Errorf("Failed to check if the object (kind=%s, %s/%s) is bound by lazy policy. err: %v", unstructuredNewObj.GetKind(), unstructuredNewObj.GetNamespace(), unstructuredNewObj.GetName(), err)
|
||||
}
|
||||
|
||||
if isLazyActivation {
|
||||
resourceItem := ResourceItem{
|
||||
Obj: newRuntimeObj,
|
||||
ResourceChangeByKarmada: eventfilter.ResourceChangeByKarmada(unstructuredOldObj, unstructuredNewObj),
|
||||
}
|
||||
|
||||
d.Processor.Enqueue(resourceItem)
|
||||
return
|
||||
}
|
||||
|
||||
// For non-lazy policies, it is no need to distinguish whether the change is from Karmada or not.
|
||||
resourceItem := ResourceItem{
|
||||
Obj: newRuntimeObj,
|
||||
}
|
||||
d.Processor.Enqueue(resourceItem)
|
||||
}
|
||||
|
||||
|
@ -478,12 +491,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion
|
||||
bindingCopy.Spec.SchedulePriority = binding.Spec.SchedulePriority
|
||||
if binding.Spec.Suspension != nil {
|
||||
if bindingCopy.Spec.Suspension == nil {
|
||||
bindingCopy.Spec.Suspension = &workv1alpha2.Suspension{}
|
||||
}
|
||||
bindingCopy.Spec.Suspension.Suspension = binding.Spec.Suspension.Suspension
|
||||
}
|
||||
bindingCopy.Spec.Suspension = util.MergePolicySuspension(bindingCopy.Spec.Suspension, policy.Spec.Suspension)
|
||||
excludeClusterPolicy(bindingCopy)
|
||||
return nil
|
||||
})
|
||||
|
@ -573,12 +581,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion
|
||||
bindingCopy.Spec.SchedulePriority = binding.Spec.SchedulePriority
|
||||
if binding.Spec.Suspension != nil {
|
||||
if bindingCopy.Spec.Suspension == nil {
|
||||
bindingCopy.Spec.Suspension = &workv1alpha2.Suspension{}
|
||||
}
|
||||
bindingCopy.Spec.Suspension.Suspension = binding.Spec.Suspension.Suspension
|
||||
}
|
||||
bindingCopy.Spec.Suspension = util.MergePolicySuspension(bindingCopy.Spec.Suspension, policy.Spec.Suspension)
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
|
@ -625,12 +628,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.Failover = binding.Spec.Failover
|
||||
bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution
|
||||
bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion
|
||||
if binding.Spec.Suspension != nil {
|
||||
if bindingCopy.Spec.Suspension == nil {
|
||||
bindingCopy.Spec.Suspension = &workv1alpha2.Suspension{}
|
||||
}
|
||||
bindingCopy.Spec.Suspension.Suspension = binding.Spec.Suspension.Suspension
|
||||
}
|
||||
bindingCopy.Spec.Suspension = util.MergePolicySuspension(bindingCopy.Spec.Suspension, policy.Spec.Suspension)
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
|
@ -654,9 +652,6 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
}
|
||||
|
||||
// GetUnstructuredObject retrieves object by key and returned its unstructured.
|
||||
// Any updates to this resource template are not recommended as it may come from the informer cache.
|
||||
// We should abide by the principle of making a deep copy first and then modifying it.
|
||||
// See issue: https://github.com/karmada-io/karmada/issues/3878.
|
||||
func (d *ResourceDetector) GetUnstructuredObject(objectKey keys.ClusterWideKey) (*unstructured.Unstructured, error) {
|
||||
objectGVR, err := restmapper.GetGroupVersionResource(d.RESTMapper, objectKey.GroupVersionKind())
|
||||
if err != nil {
|
||||
|
@ -686,7 +681,66 @@ func (d *ResourceDetector) GetUnstructuredObject(objectKey keys.ClusterWideKey)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return unstructuredObj, nil
|
||||
// perform a deep copy to avoid modifying the cached object from informer
|
||||
return unstructuredObj.DeepCopy(), nil
|
||||
}
|
||||
|
||||
// fetchResourceBinding fetches a ResourceBinding from the client or dynamic client.
|
||||
func (d *ResourceDetector) fetchResourceBinding(ctx context.Context, rbNamespace, rbName string) (*workv1alpha2.ResourceBinding, error) {
|
||||
// First try to get ResourceBinding using cached client
|
||||
rb := &workv1alpha2.ResourceBinding{}
|
||||
err := d.Client.Get(ctx, client.ObjectKey{Namespace: rbNamespace, Name: rbName}, rb)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// If not found with client, try using dynamic client
|
||||
gvr := workv1alpha2.SchemeGroupVersion.WithResource(workv1alpha2.ResourcePluralResourceBinding)
|
||||
unstructuredRB, dynamicErr := d.DynamicClient.Resource(gvr).Namespace(rbNamespace).Get(ctx, rbName, metav1.GetOptions{})
|
||||
if dynamicErr != nil {
|
||||
return nil, dynamicErr
|
||||
}
|
||||
|
||||
// Convert unstructured to ResourceBinding
|
||||
if err = helper.ConvertToTypedObject(unstructuredRB, rb); err != nil {
|
||||
klog.Errorf("Failed to convert unstructured to ResourceBinding(%s/%s): %v", rbNamespace, rbName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rb, nil
|
||||
}
|
||||
klog.Errorf("Failed to get ResourceBinding(%s/%s): %v", rbNamespace, rbName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rb, nil
|
||||
}
|
||||
|
||||
// fetchClusterResourceBinding fetches a ClusterResourceBinding from the client or dynamic client.
|
||||
func (d *ResourceDetector) fetchClusterResourceBinding(ctx context.Context, crbName string) (*workv1alpha2.ClusterResourceBinding, error) {
|
||||
// First try to get ClusterResourceBinding using cached client
|
||||
crb := &workv1alpha2.ClusterResourceBinding{}
|
||||
err := d.Client.Get(ctx, client.ObjectKey{Name: crbName}, crb)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// If not found with client, try using dynamic client
|
||||
gvr := workv1alpha2.SchemeGroupVersion.WithResource(workv1alpha2.ResourcePluralClusterResourceBinding)
|
||||
unstructuredRB, dynamicErr := d.DynamicClient.Resource(gvr).Get(ctx, crbName, metav1.GetOptions{})
|
||||
if dynamicErr != nil {
|
||||
return nil, dynamicErr
|
||||
}
|
||||
|
||||
// Convert unstructured to ClusterResourceBinding
|
||||
if err = helper.ConvertToTypedObject(unstructuredRB, crb); err != nil {
|
||||
klog.Errorf("Failed to convert unstructured to ClusterResourceBinding(%s): %v", crbName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return crb, nil
|
||||
}
|
||||
klog.Errorf("Failed to get ClusterResourceBinding(%s): %v", crbName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return crb, nil
|
||||
}
|
||||
|
||||
// ClaimPolicyForObject set policy identifier which the object associated with.
|
||||
|
@ -982,7 +1036,7 @@ func (d *ResourceDetector) ReconcilePropagationPolicy(key util.QueueKey) error {
|
|||
|
||||
if !propagationObject.DeletionTimestamp.IsZero() {
|
||||
klog.Infof("PropagationPolicy(%s) is being deleted.", ckey.NamespaceKey())
|
||||
if err = d.HandlePropagationPolicyDeletion(propagationObject.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel]); err != nil {
|
||||
if err = d.HandlePropagationPolicyDeletion(propagationObject.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel], propagationObject.Spec.ResourceSelectors); err != nil {
|
||||
return err
|
||||
}
|
||||
if controllerutil.RemoveFinalizer(propagationObject, util.PropagationPolicyControllerFinalizer) {
|
||||
|
@ -1084,7 +1138,7 @@ func (d *ResourceDetector) ReconcileClusterPropagationPolicy(key util.QueueKey)
|
|||
|
||||
if !propagationObject.DeletionTimestamp.IsZero() {
|
||||
klog.Infof("ClusterPropagationPolicy(%s) is being deleted.", ckey.NamespaceKey())
|
||||
if err = d.HandleClusterPropagationPolicyDeletion(propagationObject.Labels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel]); err != nil {
|
||||
if err = d.HandleClusterPropagationPolicyDeletion(propagationObject.Labels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel], propagationObject.Spec.ResourceSelectors); err != nil {
|
||||
return err
|
||||
}
|
||||
if controllerutil.RemoveFinalizer(propagationObject, util.ClusterPropagationPolicyControllerFinalizer) {
|
||||
|
@ -1105,32 +1159,26 @@ func (d *ResourceDetector) ReconcileClusterPropagationPolicy(key util.QueueKey)
|
|||
// the resource template a change to match another policy).
|
||||
//
|
||||
// Note: The relevant ResourceBinding will continue to exist until the resource template is gone.
|
||||
func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) error {
|
||||
func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string, resources []policyv1alpha1.ResourceSelector) error {
|
||||
claimMetadata := labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID}
|
||||
rbs, err := helper.GetResourceBindings(d.Client, claimMetadata)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list propagation bindings with policy permanentID(%s): %v", policyID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for index, binding := range rbs.Items {
|
||||
// Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding,
|
||||
// otherwise might lose the chance to do that in a retry loop (in particular, the claim metadata was successfully removed
|
||||
// from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again.
|
||||
if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, claimMetadata, CleanupPPClaimMetadata); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when propagationPolicy removed, error: %v",
|
||||
binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err)
|
||||
errs = append(errs, err)
|
||||
// Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop.
|
||||
continue
|
||||
for _, resource := range util.ExtractUniqueNamespacedSelectors(resources) {
|
||||
objRef := workv1alpha2.ObjectReference{
|
||||
APIVersion: resource.APIVersion,
|
||||
Kind: resource.Kind,
|
||||
Namespace: resource.Namespace,
|
||||
}
|
||||
|
||||
// Clean up the claim metadata from the reference binding so that the karmada scheduler won't reschedule the binding.
|
||||
if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], claimMetadata, CleanupPPClaimMetadata); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resource binding(%s/%s) when propagationPolicy removed, error: %v",
|
||||
binding.Namespace, binding.Name, err)
|
||||
rawObjects, err := helper.FetchResourceTemplatesByLabelSelector(d.DynamicClient, d.InformerManager, d.RESTMapper, objRef, labels.SelectorFromSet(claimMetadata))
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
for _, rawObject := range rawObjects {
|
||||
err := d.handleResourceTemplateAndBindingCleanup(rawObject, objRef, claimMetadata, CleanupPPClaimMetadata)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.NewAggregate(errs)
|
||||
|
@ -1141,68 +1189,66 @@ func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) erro
|
|||
// the resource template a change to match another policy).
|
||||
//
|
||||
// Note: The relevant ClusterResourceBinding or ResourceBinding will continue to exist until the resource template is gone.
|
||||
func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID string) error {
|
||||
func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID string, resources []policyv1alpha1.ResourceSelector) error {
|
||||
var errs []error
|
||||
labelSet := labels.Set{
|
||||
policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID,
|
||||
}
|
||||
claimMetadata := labels.Set{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID}
|
||||
|
||||
// load the ClusterResourceBindings which labeled with current policy
|
||||
crbs, err := helper.GetClusterResourceBindings(d.Client, labelSet)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list clusterResourceBindings with clusterPropagationPolicy permanentID(%s), error: %v", policyID, err)
|
||||
errs = append(errs, err)
|
||||
} else if len(crbs.Items) > 0 {
|
||||
for index, binding := range crbs.Items {
|
||||
// Must remove the claim metadata, such as labels and annotations, from the resource template ahead of
|
||||
// ClusterResourceBinding, otherwise might lose the chance to do that in a retry loop (in particular, the
|
||||
// claim metadata was successfully removed from ClusterResourceBinding, but resource template not), since the
|
||||
// ClusterResourceBinding will not be listed again.
|
||||
if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resource(%s-%s) when clusterPropagationPolicy removed, error: %v",
|
||||
binding.Spec.Resource.Kind, binding.Spec.Resource.Name, err)
|
||||
// Skip cleaning up policy labels and annotations from ClusterResourceBinding, give a chance to do that in a retry loop.
|
||||
continue
|
||||
}
|
||||
for _, resource := range util.ExtractUniqueNamespacedSelectors(resources) {
|
||||
objRef := workv1alpha2.ObjectReference{
|
||||
APIVersion: resource.APIVersion,
|
||||
Kind: resource.Kind,
|
||||
Namespace: resource.Namespace,
|
||||
}
|
||||
|
||||
// Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding.
|
||||
if err := d.CleanupClusterResourceBindingClaimMetadata(&crbs.Items[index], labelSet); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from clusterResourceBinding(%s) when clusterPropagationPolicy removed, error: %v",
|
||||
binding.Name, err)
|
||||
rawObjects, err := helper.FetchResourceTemplatesByLabelSelector(d.DynamicClient, d.InformerManager, d.RESTMapper, objRef, labels.SelectorFromSet(claimMetadata))
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
for _, rawObject := range rawObjects {
|
||||
err := d.handleResourceTemplateAndBindingCleanup(rawObject, objRef, claimMetadata, CleanupCPPClaimMetadata)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load the ResourceBindings which labeled with current policy
|
||||
rbs, err := helper.GetResourceBindings(d.Client, labelSet)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list resourceBindings with clusterPropagationPolicy permanentID(%s), error: %v", policyID, err)
|
||||
errs = append(errs, err)
|
||||
} else if len(rbs.Items) > 0 {
|
||||
for index, binding := range rbs.Items {
|
||||
// Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding,
|
||||
// otherwise might lose the chance to do that in a retry loop (in particular, the label was successfully
|
||||
// removed from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again.
|
||||
if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when clusterPropagationPolicy removed, error: %v",
|
||||
binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err)
|
||||
errs = append(errs, err)
|
||||
// Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop.
|
||||
continue
|
||||
}
|
||||
|
||||
// Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding.
|
||||
if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], labelSet, CleanupCPPClaimMetadata); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resourceBinding(%s/%s) when clusterPropagationPolicy removed, error: %v",
|
||||
binding.Namespace, binding.Name, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (d *ResourceDetector) handleResourceTemplateAndBindingCleanup(template *unstructured.Unstructured, objRef workv1alpha2.ObjectReference, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
bindingName := names.GenerateBindingName(template.GetKind(), template.GetName())
|
||||
if template.GetNamespace() != "" {
|
||||
// Clean up the claim metadata from the reference binding so that the karmada scheduler won't reschedule the binding.
|
||||
// Must remove the claim metadata, such as labels and annotations, from the ResourceBinding ahead of resource template,
|
||||
// otherwise might lose the chance to do that in a retry loop (in particular, the claim metadata was successfully removed
|
||||
// from resource template, but ResourceBinding not), since the resource template will not be listed again.
|
||||
if err := d.CleanupResourceBindingClaimMetadata(template.GetNamespace(), bindingName, targetClaimMetadata, cleanupFunc); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from ResourceBinding(%s/%s), error: %v",
|
||||
objRef.Namespace, bindingName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding.
|
||||
// Must remove the claim metadata, such as labels and annotations, from the ClusterResourceBinding ahead of resource template,
|
||||
// otherwise might lose the chance to do that in a retry loop (in particular, the claim metadata was successfully removed
|
||||
// from resource template, but ClusterResourceBinding not), since the resource template will not be listed again.
|
||||
if err := d.CleanupClusterResourceBindingClaimMetadata(bindingName, targetClaimMetadata, cleanupFunc); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from ClusterResourceBinding(%s), error: %v",
|
||||
bindingName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.CleanupResourceTemplateClaimMetadata(template, objRef, targetClaimMetadata, cleanupFunc); err != nil {
|
||||
klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when propagationPolicy removed, error: %v",
|
||||
template.GetKind(), template.GetNamespace(), template.GetName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandlePropagationPolicyCreationOrUpdate handles PropagationPolicy add and update event.
|
||||
// When a new policy arrives, should check whether existing objects are no longer matched by the current policy,
|
||||
// if yes, clean the labels on the object.
|
||||
|
@ -1230,7 +1276,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: resourceKey, ResourceChangeByKarmada: true})
|
||||
d.enqueueResourceTemplateForPolicyChange(resourceKey, policy.Spec.ActivationPreference)
|
||||
}
|
||||
|
||||
// check whether there are matched RT in waiting list, is so, add it to processor
|
||||
|
@ -1248,7 +1294,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic
|
|||
|
||||
for _, key := range matchedKeys {
|
||||
d.RemoveWaiting(key)
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: key, ResourceChangeByKarmada: true})
|
||||
d.enqueueResourceTemplateForPolicyChange(key, policy.Spec.ActivationPreference)
|
||||
}
|
||||
|
||||
// If preemption is enabled, handle the preemption process.
|
||||
|
@ -1297,14 +1343,14 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: resourceKey, ResourceChangeByKarmada: true})
|
||||
d.enqueueResourceTemplateForPolicyChange(resourceKey, policy.Spec.ActivationPreference)
|
||||
}
|
||||
for _, crb := range clusterResourceBindings.Items {
|
||||
resourceKey, err := helper.ConstructClusterWideKey(crb.Spec.Resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: resourceKey, ResourceChangeByKarmada: true})
|
||||
d.enqueueResourceTemplateForPolicyChange(resourceKey, policy.Spec.ActivationPreference)
|
||||
}
|
||||
|
||||
matchedKeys := d.GetMatching(policy.Spec.ResourceSelectors)
|
||||
|
@ -1321,7 +1367,7 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy
|
|||
|
||||
for _, key := range matchedKeys {
|
||||
d.RemoveWaiting(key)
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: key, ResourceChangeByKarmada: true})
|
||||
d.enqueueResourceTemplateForPolicyChange(key, policy.Spec.ActivationPreference)
|
||||
}
|
||||
|
||||
// If preemption is enabled, handle the preemption process.
|
||||
|
@ -1335,24 +1381,14 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy
|
|||
}
|
||||
|
||||
// CleanupResourceTemplateClaimMetadata removes claim metadata, such as labels and annotations, from object referencing by objRef.
|
||||
func (d *ResourceDetector) CleanupResourceTemplateClaimMetadata(objRef workv1alpha2.ObjectReference, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
func (d *ResourceDetector) CleanupResourceTemplateClaimMetadata(obj *unstructured.Unstructured, objRef workv1alpha2.ObjectReference, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
gvr, err := restmapper.GetGroupVersionResource(d.RESTMapper, schema.FromAPIVersionAndKind(objRef.APIVersion, objRef.Kind))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert GVR from GVK(%s/%s), err: %v", objRef.APIVersion, objRef.Kind, err)
|
||||
return err
|
||||
}
|
||||
|
||||
workload := obj.DeepCopy()
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
workload, err := d.DynamicClient.Resource(gvr).Namespace(objRef.Namespace).Get(context.TODO(), objRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// do nothing if resource template not exist, it might have been removed.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("Failed to fetch resource(kind=%s, %s/%s): err is %v", objRef.Kind, objRef.Namespace, objRef.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !NeedCleanupClaimMetadata(workload, targetClaimMetadata) {
|
||||
klog.Infof("No need to clean up the claim metadata on resource(kind=%s, %s/%s) since they have changed", workload.GetKind(), workload.GetNamespace(), workload.GetName())
|
||||
return nil
|
||||
|
@ -1363,15 +1399,37 @@ func (d *ResourceDetector) CleanupResourceTemplateClaimMetadata(objRef workv1alp
|
|||
_, err = d.DynamicClient.Resource(gvr).Namespace(workload.GetNamespace()).Update(context.TODO(), workload, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update resource(kind=%s, %s/%s): err is %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
|
||||
if apierrors.IsConflict(err) {
|
||||
newWorkload, getErr := d.DynamicClient.Resource(gvr).Namespace(workload.GetNamespace()).Get(context.TODO(), workload.GetName(), metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
// do nothing if resource template not exist, it might have been removed.
|
||||
if apierrors.IsNotFound(getErr) {
|
||||
return nil
|
||||
}
|
||||
return getErr
|
||||
}
|
||||
workload = newWorkload
|
||||
}
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Updated resource template(kind=%s, %s/%s) successfully", workload.GetKind(), workload.GetNamespace(), workload.GetName())
|
||||
klog.V(2).Infof("Clean claimed label for resource template(kind=%s, %s/%s) successfully", workload.GetKind(), workload.GetNamespace(), workload.GetName())
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CleanupResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from resource binding.
|
||||
func (d *ResourceDetector) CleanupResourceBindingClaimMetadata(rb *workv1alpha2.ResourceBinding, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
func (d *ResourceDetector) CleanupResourceBindingClaimMetadata(rbNamespace, rbName string, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
var rb *workv1alpha2.ResourceBinding
|
||||
var err error
|
||||
|
||||
rb, err = d.fetchResourceBinding(context.TODO(), rbNamespace, rbName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // do nothing if resource binding not exist, it might have been removed.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
if !NeedCleanupClaimMetadata(rb, targetClaimMetadata) {
|
||||
klog.Infof("No need to clean up the claim metadata on ResourceBinding(%s/%s) since they have changed", rb.GetNamespace(), rb.GetName())
|
||||
|
@ -1383,35 +1441,90 @@ func (d *ResourceDetector) CleanupResourceBindingClaimMetadata(rb *workv1alpha2.
|
|||
return nil
|
||||
}
|
||||
|
||||
updated := &workv1alpha2.ResourceBinding{}
|
||||
if err = d.Client.Get(context.TODO(), client.ObjectKey{Namespace: rb.GetNamespace(), Name: rb.GetName()}, updated); err == nil {
|
||||
rb = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("Failed to get updated ResourceBinding(%s/%s): %v", rb.GetNamespace(), rb.GetName(), err)
|
||||
if apierrors.IsConflict(updateErr) {
|
||||
updated := &workv1alpha2.ResourceBinding{}
|
||||
gvr := workv1alpha2.SchemeGroupVersion.WithResource(workv1alpha2.ResourcePluralResourceBinding)
|
||||
if unstructuredRB, dynamicErr := d.DynamicClient.Resource(gvr).Namespace(rbNamespace).Get(context.TODO(), rbName, metav1.GetOptions{}); dynamicErr == nil {
|
||||
// Convert unstructured to ResourceBinding
|
||||
if convertErr := helper.ConvertToTypedObject(unstructuredRB, updated); convertErr != nil {
|
||||
klog.Errorf("Failed to convert unstructured to ResourceBinding(%s/%s): %v", rbNamespace, rbName, convertErr)
|
||||
return convertErr
|
||||
}
|
||||
rb = updated
|
||||
} else {
|
||||
if apierrors.IsNotFound(dynamicErr) {
|
||||
return nil // do nothing if resource binding not exist, it might have been removed.
|
||||
}
|
||||
klog.Errorf("Failed to get updated ResourceBinding(%s/%s): %v", rbNamespace, rbName, dynamicErr)
|
||||
return dynamicErr
|
||||
}
|
||||
}
|
||||
|
||||
return updateErr
|
||||
})
|
||||
}
|
||||
|
||||
// CleanupClusterResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from cluster resource binding.
|
||||
func (d *ResourceDetector) CleanupClusterResourceBindingClaimMetadata(crb *workv1alpha2.ClusterResourceBinding, targetClaimMetadata map[string]string) error {
|
||||
func (d *ResourceDetector) CleanupClusterResourceBindingClaimMetadata(crbName string, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error {
|
||||
var crb *workv1alpha2.ClusterResourceBinding
|
||||
var err error
|
||||
|
||||
crb, err = d.fetchClusterResourceBinding(context.TODO(), crbName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // do nothing if resource binding not exist, it might have been removed.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
if !NeedCleanupClaimMetadata(crb, targetClaimMetadata) {
|
||||
klog.Infof("No need to clean up the claim metadata on ClusterResourceBinding(%s) since they have changed", crb.GetName())
|
||||
return nil
|
||||
}
|
||||
CleanupCPPClaimMetadata(crb)
|
||||
cleanupFunc(crb)
|
||||
updateErr := d.Client.Update(context.TODO(), crb)
|
||||
if updateErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
updated := &workv1alpha2.ClusterResourceBinding{}
|
||||
if err = d.Client.Get(context.TODO(), client.ObjectKey{Name: crb.GetName()}, updated); err == nil {
|
||||
crb = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("Failed to get updated ClusterResourceBinding(%s):: %v", crb.GetName(), err)
|
||||
if apierrors.IsConflict(updateErr) {
|
||||
updated := &workv1alpha2.ClusterResourceBinding{}
|
||||
gvr := workv1alpha2.SchemeGroupVersion.WithResource(workv1alpha2.ResourcePluralClusterResourceBinding)
|
||||
if unstructuredRB, dynamicErr := d.DynamicClient.Resource(gvr).Get(context.TODO(), crbName, metav1.GetOptions{}); dynamicErr == nil {
|
||||
// Convert unstructured to ClusterResourceBinding
|
||||
if convertErr := helper.ConvertToTypedObject(unstructuredRB, updated); convertErr != nil {
|
||||
klog.Errorf("Failed to convert unstructured to ClusterResourceBinding(%s): %v", crbName, convertErr)
|
||||
return convertErr
|
||||
}
|
||||
crb = updated
|
||||
} else {
|
||||
if apierrors.IsNotFound(dynamicErr) {
|
||||
return nil // do nothing if resource binding not exist, it might have been removed.
|
||||
}
|
||||
klog.Errorf("Failed to get updated ClusterResourceBinding(%s): %v", crbName, dynamicErr)
|
||||
return dynamicErr
|
||||
}
|
||||
}
|
||||
|
||||
return updateErr
|
||||
})
|
||||
}
|
||||
|
||||
// enqueueResourceTemplateForPolicyChange enqueues a resource template key for reconciliation in response to a
|
||||
// PropagationPolicy or ClusterPropagationPolicy change. If the policy's ActivationPreference is set to Lazy,
|
||||
// the ResourceChangeByKarmada flag is set to true, indicating that the resource template is being enqueued
|
||||
// due to a policy change and should not be propagated to member clusters. For non-lazy policies, this flag
|
||||
// is omitted as the distinction is unnecessary.
|
||||
//
|
||||
// Note: Setting ResourceChangeByKarmada changes the effective queue key. Mixing both true/false for the same
|
||||
// resource may result in two different queue keys being processed concurrently, which can cause race conditions.
|
||||
// Therefore, only set ResourceChangeByKarmada in lazy activation mode.
|
||||
// For more details, see: https://github.com/karmada-io/karmada/issues/5996.
|
||||
func (d *ResourceDetector) enqueueResourceTemplateForPolicyChange(key keys.ClusterWideKey, pref policyv1alpha1.ActivationPreference) {
|
||||
if util.IsLazyActivationEnabled(pref) {
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: key, ResourceChangeByKarmada: true})
|
||||
return
|
||||
}
|
||||
d.Processor.Add(keys.ClusterWideKeyWithConfig{ClusterWideKey: key})
|
||||
}
|
||||
|
|
|
@ -431,7 +431,6 @@ func TestOnUpdate(t *testing.T) {
|
|||
oldObj interface{}
|
||||
newObj interface{}
|
||||
expectedEnqueue bool
|
||||
expectedChangeByKarmada bool
|
||||
expectToUnstructuredError bool
|
||||
}{
|
||||
{
|
||||
|
@ -462,8 +461,7 @@ func TestOnUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expectedEnqueue: true,
|
||||
expectedChangeByKarmada: false,
|
||||
expectedEnqueue: true,
|
||||
},
|
||||
{
|
||||
name: "update without changes",
|
||||
|
@ -526,8 +524,7 @@ func TestOnUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expectedEnqueue: true,
|
||||
expectedChangeByKarmada: true,
|
||||
expectedEnqueue: true,
|
||||
},
|
||||
{
|
||||
name: "core v1 object",
|
||||
|
@ -575,7 +572,6 @@ func TestOnUpdate(t *testing.T) {
|
|||
assert.IsType(t, ResourceItem{}, mockProcessor.lastEnqueued, "Enqueued item should be of type ResourceItem")
|
||||
enqueued := mockProcessor.lastEnqueued.(ResourceItem)
|
||||
assert.Equal(t, tt.newObj, enqueued.Obj, "Enqueued object should match the new object")
|
||||
assert.Equal(t, tt.expectedChangeByKarmada, enqueued.ResourceChangeByKarmada, "ResourceChangeByKarmada flag should match expected value")
|
||||
} else {
|
||||
assert.Equal(t, 0, mockProcessor.enqueueCount, "Object should not be enqueued")
|
||||
}
|
||||
|
@ -973,6 +969,71 @@ func TestApplyClusterPolicy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEnqueueResourceKeyWithActivationPref(t *testing.T) {
|
||||
testClusterWideKey := keys.ClusterWideKey{
|
||||
Group: "foo",
|
||||
Version: "foo",
|
||||
Kind: "foo",
|
||||
Namespace: "foo",
|
||||
Name: "foo",
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
key keys.ClusterWideKey
|
||||
pref policyv1alpha1.ActivationPreference
|
||||
want keys.ClusterWideKeyWithConfig
|
||||
}{
|
||||
{
|
||||
name: "lazy pp and resourceChangeByKarmada is true",
|
||||
key: testClusterWideKey,
|
||||
pref: policyv1alpha1.LazyActivation,
|
||||
want: keys.ClusterWideKeyWithConfig{
|
||||
ClusterWideKey: testClusterWideKey,
|
||||
ResourceChangeByKarmada: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-lazy ignores ResourceChangeByKarmada",
|
||||
key: testClusterWideKey,
|
||||
pref: "",
|
||||
want: keys.ClusterWideKeyWithConfig{
|
||||
ClusterWideKey: testClusterWideKey,
|
||||
ResourceChangeByKarmada: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
detector := ResourceDetector{
|
||||
Processor: util.NewAsyncWorker(util.Options{
|
||||
Name: "resource detector",
|
||||
KeyFunc: ResourceItemKeyFunc,
|
||||
ReconcileFunc: func(key util.QueueKey) (err error) {
|
||||
defer cancel()
|
||||
defer func() {
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
clusterWideKeyWithConfig, ok := key.(keys.ClusterWideKeyWithConfig)
|
||||
if !ok {
|
||||
err = fmt.Errorf("invalid key")
|
||||
return err
|
||||
}
|
||||
if clusterWideKeyWithConfig != tt.want {
|
||||
err = fmt.Errorf("unexpected key. want:%+v, got:%+v", tt.want, clusterWideKeyWithConfig)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}),
|
||||
}
|
||||
detector.Processor.Run(1, ctx.Done())
|
||||
detector.enqueueResourceTemplateForPolicyChange(tt.key, tt.pref)
|
||||
<-ctx.Done()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper Functions
|
||||
|
||||
// setupTestScheme creates a runtime scheme with necessary types for testing
|
||||
|
|
|
@ -108,11 +108,24 @@ func (d *ResourceDetector) propagateResource(object *unstructured.Unstructured,
|
|||
func (d *ResourceDetector) getAndApplyPolicy(object *unstructured.Unstructured, objectKey keys.ClusterWideKey,
|
||||
resourceChangeByKarmada bool, policyNamespace, policyName, claimedID string) error {
|
||||
policyObject, err := d.propagationPolicyLister.ByNamespace(policyNamespace).Get(policyName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("PropagationPolicy(%s/%s) has been removed.", policyNamespace, policyName)
|
||||
return d.HandlePropagationPolicyDeletion(claimedID)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// The expected PropagationPolicy has been removed, indicating the cleanup process has completed.
|
||||
// However, if the claimed data still exists in the resource template, it likely means the cleanup
|
||||
// for the resource template was missed. This can happen if the claimed data was added to the resource
|
||||
// template and then the PropagationPolicy was removed very shortly after.
|
||||
// When cleaning up resource templates based on PropagationPolicy deletion, the cache may not have received
|
||||
// the resource template update in time, resulting in a missed cleanup.
|
||||
klog.V(4).Infof("PropagationPolicy(%s/%s) has been removed.", policyNamespace, policyName)
|
||||
claimMetadata := labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: claimedID}
|
||||
objRef := workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
Namespace: object.GetNamespace(),
|
||||
Name: object.GetName(),
|
||||
}
|
||||
return d.handleResourceTemplateAndBindingCleanup(object, objRef, claimMetadata, CleanupPPClaimMetadata)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get claimed policy(%s/%s),: %v", policyNamespace, policyName, err)
|
||||
return err
|
||||
}
|
||||
|
@ -123,6 +136,11 @@ func (d *ResourceDetector) getAndApplyPolicy(object *unstructured.Unstructured,
|
|||
return err
|
||||
}
|
||||
|
||||
// If the policy is being deleted, we should not apply it. Instead, waiting for the next reconcile to clean up metadata.
|
||||
if !matchedPropagationPolicy.DeletionTimestamp.IsZero() {
|
||||
return fmt.Errorf("policy(%s/%s) is being deleted", policyNamespace, policyName)
|
||||
}
|
||||
|
||||
// Some resources are available in more than one group in the same kubernetes version.
|
||||
// Therefore, the following scenarios occurs:
|
||||
// In v1.21 kubernetes cluster, Ingress are available in both networking.k8s.io and extensions groups.
|
||||
|
@ -146,12 +164,24 @@ func (d *ResourceDetector) getAndApplyPolicy(object *unstructured.Unstructured,
|
|||
func (d *ResourceDetector) getAndApplyClusterPolicy(object *unstructured.Unstructured, objectKey keys.ClusterWideKey,
|
||||
resourceChangeByKarmada bool, policyName, policyID string) error {
|
||||
policyObject, err := d.clusterPropagationPolicyLister.Get(policyName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("ClusterPropagationPolicy(%s) has been removed.", policyName)
|
||||
return d.HandleClusterPropagationPolicyDeletion(policyID)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// The expected ClusterPropagationPolicy has been removed, indicating the cleanup process has completed.
|
||||
// However, if the claimed data still exists in the resource template, it likely means the cleanup
|
||||
// for the resource template was missed. This can happen if the claimed data was added to the resource
|
||||
// template and then the ClusterPropagationPolicy was removed very shortly after.
|
||||
// When cleaning up resource templates based on ClusterPropagationPolicy deletion, the cache may not have received
|
||||
// the resource template update in time, resulting in a missed cleanup.
|
||||
klog.V(4).Infof("ClusterPropagationPolicy(%s) has been removed.", policyName)
|
||||
claimMetadata := labels.Set{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID}
|
||||
objRef := workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
Namespace: object.GetNamespace(),
|
||||
Name: object.GetName(),
|
||||
}
|
||||
|
||||
return d.handleResourceTemplateAndBindingCleanup(object, objRef, claimMetadata, CleanupCPPClaimMetadata)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get claimed policy(%s),: %v", policyName, err)
|
||||
return err
|
||||
}
|
||||
|
@ -162,6 +192,11 @@ func (d *ResourceDetector) getAndApplyClusterPolicy(object *unstructured.Unstruc
|
|||
return err
|
||||
}
|
||||
|
||||
if !matchedClusterPropagationPolicy.DeletionTimestamp.IsZero() {
|
||||
// If the cluster policy is being deleted, we should not apply it. Instead, waiting for the next reconcile to clean up metadata.
|
||||
return fmt.Errorf("cluster policy(%s) is being deleted", policyName)
|
||||
}
|
||||
|
||||
// Some resources are available in more than one group in the same kubernetes version.
|
||||
// Therefore, the following scenarios occurs:
|
||||
// In v1.21 kubernetes cluster, Ingress are available in both networking.k8s.io and extensions groups.
|
||||
|
@ -281,7 +316,6 @@ func (d *ResourceDetector) removeResourceClaimMetadataIfNotMatched(objectReferen
|
|||
return false, nil
|
||||
}
|
||||
|
||||
object = object.DeepCopy()
|
||||
util.RemoveLabels(object, labels...)
|
||||
util.RemoveAnnotations(object, annotations...)
|
||||
|
||||
|
@ -339,6 +373,51 @@ func (d *ResourceDetector) listCPPDerivedCRBs(policyID, policyName string) (*wor
|
|||
return bindings, nil
|
||||
}
|
||||
|
||||
func (d *ResourceDetector) isClaimedByLazyPolicy(obj *unstructured.Unstructured) (bool, error) {
|
||||
policyAnnotations := obj.GetAnnotations()
|
||||
policyLabels := obj.GetLabels()
|
||||
policyNamespace := util.GetAnnotationValue(policyAnnotations, policyv1alpha1.PropagationPolicyNamespaceAnnotation)
|
||||
policyName := util.GetAnnotationValue(policyAnnotations, policyv1alpha1.PropagationPolicyNameAnnotation)
|
||||
claimedID := util.GetLabelValue(policyLabels, policyv1alpha1.PropagationPolicyPermanentIDLabel)
|
||||
if policyNamespace != "" && policyName != "" && claimedID != "" {
|
||||
policyObject, err := d.propagationPolicyLister.ByNamespace(policyNamespace).Get(policyName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
matchedPropagationPolicy := &policyv1alpha1.PropagationPolicy{}
|
||||
if err = helper.ConvertToTypedObject(policyObject, matchedPropagationPolicy); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return util.IsLazyActivationEnabled(matchedPropagationPolicy.Spec.ActivationPreference), nil
|
||||
}
|
||||
|
||||
policyName = util.GetAnnotationValue(policyAnnotations, policyv1alpha1.ClusterPropagationPolicyAnnotation)
|
||||
claimedID = util.GetLabelValue(policyLabels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel)
|
||||
if policyName != "" && claimedID != "" {
|
||||
policyObject, err := d.clusterPropagationPolicyLister.Get(policyName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
matchedClusterPropagationPolicy := &policyv1alpha1.ClusterPropagationPolicy{}
|
||||
if err = helper.ConvertToTypedObject(policyObject, matchedClusterPropagationPolicy); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return util.IsLazyActivationEnabled(matchedClusterPropagationPolicy.Spec.ActivationPreference), nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// excludeClusterPolicy excludes cluster propagation policy.
|
||||
// If propagation policy was claimed, cluster propagation policy should not exist.
|
||||
func excludeClusterPolicy(obj metav1.Object) (hasClaimedClusterPolicy bool) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package utils
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -35,7 +36,7 @@ var (
|
|||
// This blocks until the Deployment's observed generation and ready replicas match the desired state,
|
||||
// ensuring it is fully rolled out.
|
||||
WaitForDeploymentRollout = func(c clientset.Interface, dep *appsv1.Deployment, timeoutSeconds int) error {
|
||||
return cmdutil.WaitForDeploymentRollout(c, dep, timeoutSeconds)
|
||||
return cmdutil.WaitForDeploymentRollout(c, dep, time.Duration(timeoutSeconds)*time.Second)
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ package utils
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -32,5 +33,5 @@ func CreateDeployAndWait(kubeClientSet kubernetes.Interface, deployment *appsv1.
|
|||
if _, err := kubeClientSet.AppsV1().Deployments(deployment.GetNamespace()).Create(context.TODO(), deployment, metav1.CreateOptions{}); err != nil {
|
||||
klog.Warning(err)
|
||||
}
|
||||
return util.WaitForDeploymentRollout(kubeClientSet, deployment, waitComponentReadyTimeout)
|
||||
return util.WaitForDeploymentRollout(kubeClientSet, deployment, time.Duration(waitComponentReadyTimeout)*time.Second)
|
||||
}
|
||||
|
|
|
@ -228,22 +228,15 @@ func (j *CommandJoinOption) RunJoinCluster(controlPlaneRestConfig, clusterConfig
|
|||
ClusterConfig: clusterConfig,
|
||||
}
|
||||
|
||||
id, err := util.ObtainClusterID(clusterKubeClient)
|
||||
registerOption.ClusterID, err = util.ObtainClusterID(clusterKubeClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, name, err := util.IsClusterIdentifyUnique(karmadaClient, id)
|
||||
if err != nil {
|
||||
if err = registerOption.Validate(karmadaClient, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("the same cluster has been registered with name %s", name)
|
||||
}
|
||||
|
||||
registerOption.ClusterID = id
|
||||
|
||||
clusterSecret, impersonatorSecret, err := util.ObtainCredentialsFromMemberCluster(clusterKubeClient, registerOption)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -105,7 +105,7 @@ func TestRunJoinCluster(t *testing.T) {
|
|||
prep func(karmadaClient karmadaclientset.Interface, controlKubeClient kubeclient.Interface, clusterKubeClient kubeclient.Interface, opts *CommandJoinOption, clusterID types.UID, clusterName string) error
|
||||
verify func(karmadaClient karmadaclientset.Interface, controlKubeClient kubeclient.Interface, clusterKubeClint kubeclient.Interface, opts *CommandJoinOption, clusterID types.UID) error
|
||||
wantErr bool
|
||||
errMsg string
|
||||
errMsg func(opts *CommandJoinOption, clusterID types.UID) string
|
||||
}{
|
||||
{
|
||||
name: "RunJoinCluster_RegisterTheSameClusterWithSameID_TheSameClusterHasBeenRegistered",
|
||||
|
@ -136,7 +136,9 @@ func TestRunJoinCluster(t *testing.T) {
|
|||
return nil
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "the same cluster has been registered with name member1",
|
||||
errMsg: func(opts *CommandJoinOption, clusterID types.UID) string {
|
||||
return fmt.Sprintf("the cluster ID %s or the cluster name %s has been registered", clusterID, opts.ClusterName)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RunJoinCluster_RegisterClusterInControllerPlane_ClusterRegisteredInControllerPlane",
|
||||
|
@ -170,8 +172,8 @@ func TestRunJoinCluster(t *testing.T) {
|
|||
if err != nil && !test.wantErr {
|
||||
t.Errorf("unexpected error, got: %v", err)
|
||||
}
|
||||
if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) {
|
||||
t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error())
|
||||
if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg(test.joinOpts, test.clusterID)) {
|
||||
t.Errorf("expected error message %s to be in %s", test.errMsg(test.joinOpts, test.clusterID), err.Error())
|
||||
}
|
||||
if err := test.verify(test.karmadaClient, test.controlKubeClient, test.clusterKubeClient, test.joinOpts, test.clusterID); err != nil {
|
||||
t.Errorf("failed to verify joining the cluster, got error: %v", err)
|
||||
|
|
|
@ -460,12 +460,9 @@ func (o *CommandPromoteOption) promoteDeps(memberClusterFactory cmdutil.Factory,
|
|||
controlPlaneKubeClientSet := kubeClientBuilder(config)
|
||||
sharedFactory := informers.NewSharedInformerFactory(controlPlaneKubeClientSet, 0)
|
||||
serviceLister := sharedFactory.Core().V1().Services().Lister()
|
||||
|
||||
sharedFactory.Start(stopCh)
|
||||
sharedFactory.WaitForCacheSync(stopCh)
|
||||
controlPlaneInformerManager.Start()
|
||||
if sync := controlPlaneInformerManager.WaitForCacheSync(); sync == nil {
|
||||
return errors.New("informer factory for cluster does not exist")
|
||||
}
|
||||
|
||||
defaultInterpreter := native.NewDefaultInterpreter()
|
||||
thirdpartyInterpreter := thirdparty.NewConfigurableInterpreter()
|
||||
|
@ -475,6 +472,11 @@ func (o *CommandPromoteOption) promoteDeps(memberClusterFactory cmdutil.Factory,
|
|||
return fmt.Errorf("failed to create customized interpreter: %v", err)
|
||||
}
|
||||
|
||||
controlPlaneInformerManager.Start()
|
||||
if syncs := controlPlaneInformerManager.WaitForCacheSync(); len(syncs) == 0 {
|
||||
return errors.New("no informers registered in the informer factory")
|
||||
}
|
||||
|
||||
// check if the resource interpreter supports to interpret dependencies
|
||||
if !defaultInterpreter.HookEnabled(obj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretDependency) &&
|
||||
!thirdpartyInterpreter.HookEnabled(obj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretDependency) &&
|
||||
|
|
|
@ -444,7 +444,7 @@ func (o *CommandRegisterOption) EnsureNecessaryResourcesExistInMemberCluster(boo
|
|||
return err
|
||||
}
|
||||
|
||||
if err = cmdutil.WaitForDeploymentRollout(o.memberClusterClient, KarmadaAgentDeployment, int(o.Timeout)); err != nil {
|
||||
if err = cmdutil.WaitForDeploymentRollout(o.memberClusterClient, KarmadaAgentDeployment, o.Timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -59,10 +59,10 @@ func WaitForStatefulSetRollout(c kubernetes.Interface, sts *appsv1.StatefulSet,
|
|||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollout wait for Deployment reaches the ready state or timeout.
|
||||
func WaitForDeploymentRollout(c kubernetes.Interface, dep *appsv1.Deployment, timeoutSeconds int) error {
|
||||
// WaitForDeploymentRollout wait for Deployment reaches the ready state or timeout.
|
||||
func WaitForDeploymentRollout(c kubernetes.Interface, dep *appsv1.Deployment, timeout time.Duration) error {
|
||||
var lastErr error
|
||||
pollError := wait.PollUntilContextTimeout(context.TODO(), time.Second, time.Duration(timeoutSeconds)*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pollError := wait.PollUntilContextTimeout(context.TODO(), time.Second, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
d, err := c.AppsV1().Deployments(dep.GetNamespace()).Get(ctx, dep.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
|
|
|
@ -17,38 +17,38 @@ limitations under the License.
|
|||
package configmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
var resourceInterpreterCustomizationsGVR = schema.GroupVersionResource{
|
||||
Group: configv1alpha1.GroupVersion.Group,
|
||||
Version: configv1alpha1.GroupVersion.Version,
|
||||
Resource: "resourceinterpretercustomizations",
|
||||
}
|
||||
|
||||
// ConfigManager can list custom resource interpreter.
|
||||
type ConfigManager interface {
|
||||
CustomAccessors() map[schema.GroupVersionKind]CustomAccessor
|
||||
HasSynced() bool
|
||||
// LoadConfig is used to load ResourceInterpreterCustomization into the cache,
|
||||
// it requires the provided customizations to be a full list of objects.
|
||||
// It is recommended to be called during startup. After called, HasSynced() will always
|
||||
// return true, and CustomAccessors() will return a map of CustomAccessor containing
|
||||
// all ResourceInterpreterCustomization configurations.
|
||||
LoadConfig(customizations []*configv1alpha1.ResourceInterpreterCustomization)
|
||||
}
|
||||
|
||||
// interpreterConfigManager collects the resource interpreter customization.
|
||||
type interpreterConfigManager struct {
|
||||
initialSynced atomic.Bool
|
||||
informer genericmanager.SingleClusterInformerManager
|
||||
lister cache.GenericLister
|
||||
configuration atomic.Value
|
||||
}
|
||||
|
@ -64,16 +64,12 @@ func (configManager *interpreterConfigManager) HasSynced() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
if configuration, err := configManager.lister.List(labels.Everything()); err == nil && len(configuration) == 0 {
|
||||
// the empty list we initially stored is valid to use.
|
||||
// Setting initialSynced to true, so subsequent checks
|
||||
// would be able to take the fast path on the atomic boolean in a
|
||||
// cluster without any customization configured.
|
||||
configManager.initialSynced.Store(true)
|
||||
// the informer has synced, and we don't have any items
|
||||
return true
|
||||
err := configManager.updateConfiguration()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error updating configuration")
|
||||
return false
|
||||
}
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
// NewInterpreterConfigManager watches ResourceInterpreterCustomization and organizes
|
||||
|
@ -84,35 +80,48 @@ func NewInterpreterConfigManager(informer genericmanager.SingleClusterInformerMa
|
|||
|
||||
// In interpret command, rules are not loaded from server, so we don't start informer for it.
|
||||
if informer != nil {
|
||||
manager.lister = informer.Lister(resourceInterpreterCustomizationsGVR)
|
||||
manager.informer = informer
|
||||
manager.lister = informer.Lister(util.ResourceInterpreterCustomizationsGVR)
|
||||
configHandlers := fedinformer.NewHandlerOnEvents(
|
||||
func(_ interface{}) { manager.updateConfiguration() },
|
||||
func(_, _ interface{}) { manager.updateConfiguration() },
|
||||
func(_ interface{}) { manager.updateConfiguration() })
|
||||
informer.ForResource(resourceInterpreterCustomizationsGVR, configHandlers)
|
||||
func(_ interface{}) { _ = manager.updateConfiguration() },
|
||||
func(_, _ interface{}) { _ = manager.updateConfiguration() },
|
||||
func(_ interface{}) { _ = manager.updateConfiguration() })
|
||||
informer.ForResource(util.ResourceInterpreterCustomizationsGVR, configHandlers)
|
||||
}
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
func (configManager *interpreterConfigManager) updateConfiguration() {
|
||||
// updateConfiguration is used as the event handler for the ResourceInterpreterCustomization resource.
|
||||
// Any changes (add, update, delete) to these resources will trigger this method, which loads all
|
||||
// ResourceInterpreterCustomization resources and refreshes the internal cache accordingly.
|
||||
// Note: During startup, some events may be missed if the informer has not yet synced. If all events
|
||||
// are missed during startup, updateConfiguration will be called when HasSynced() is invoked for the
|
||||
// first time, ensuring the cache is updated on first use.
|
||||
func (configManager *interpreterConfigManager) updateConfiguration() error {
|
||||
if configManager.informer == nil {
|
||||
return errors.New("informer manager is not configured")
|
||||
}
|
||||
if !configManager.informer.IsInformerSynced(util.ResourceInterpreterCustomizationsGVR) {
|
||||
return errors.New("informer of ResourceInterpreterCustomization not synced")
|
||||
}
|
||||
|
||||
configurations, err := configManager.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
configs := make([]*configv1alpha1.ResourceInterpreterCustomization, len(configurations))
|
||||
for index, c := range configurations {
|
||||
config := &configv1alpha1.ResourceInterpreterCustomization{}
|
||||
if err = helper.ConvertToTypedObject(c, config); err != nil {
|
||||
klog.Errorf("Failed to transform ResourceInterpreterCustomization: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
configs[index] = config
|
||||
}
|
||||
|
||||
configManager.LoadConfig(configs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (configManager *interpreterConfigManager) LoadConfig(configs []*configv1alpha1.ResourceInterpreterCustomization) {
|
||||
|
|
|
@ -18,14 +18,17 @@ package configmanager
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
|
@ -151,3 +154,366 @@ func Test_interpreterConfigManager_LuaScriptAccessors(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_interpreterConfigManager_LoadConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configs []*configv1alpha1.ResourceInterpreterCustomization
|
||||
want map[schema.GroupVersionKind]CustomAccessor
|
||||
}{
|
||||
{
|
||||
name: "empty configs",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{},
|
||||
want: make(map[schema.GroupVersionKind]CustomAccessor),
|
||||
},
|
||||
{
|
||||
name: "single config",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization01"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "retention-script"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[schema.GroupVersionKind]CustomAccessor{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"}: &resourceCustomAccessor{
|
||||
retention: &configv1alpha1.LocalValueRetention{LuaScript: "retention-script"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple configs for same GVK",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization01"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "retention-script"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization02"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
ReplicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "replica-script"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[schema.GroupVersionKind]CustomAccessor{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"}: &resourceCustomAccessor{
|
||||
retention: &configv1alpha1.LocalValueRetention{LuaScript: "retention-script"},
|
||||
replicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "replica-script"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple configs for different GVKs",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization01"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "deployment-retention"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization02"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
ReplicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "statefulset-replica"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[schema.GroupVersionKind]CustomAccessor{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"}: &resourceCustomAccessor{
|
||||
retention: &configv1alpha1.LocalValueRetention{LuaScript: "deployment-retention"},
|
||||
},
|
||||
{Group: "apps", Version: "v1", Kind: "StatefulSet"}: &resourceCustomAccessor{
|
||||
replicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "statefulset-replica"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "configs sorted by name",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization02"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "second"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization01"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "first"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[schema.GroupVersionKind]CustomAccessor{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"}: &resourceCustomAccessor{
|
||||
retention: &configv1alpha1.LocalValueRetention{LuaScript: "first"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overlapping customizations for same GVK",
|
||||
configs: []*configv1alpha1.ResourceInterpreterCustomization{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization01"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "first-retention"},
|
||||
ReplicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "first-replica"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "customization02"},
|
||||
Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{
|
||||
Target: configv1alpha1.CustomizationTarget{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Customizations: configv1alpha1.CustomizationRules{
|
||||
Retention: &configv1alpha1.LocalValueRetention{LuaScript: "second-retention"},
|
||||
StatusReflection: &configv1alpha1.StatusReflection{LuaScript: "second-status"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[schema.GroupVersionKind]CustomAccessor{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"}: &resourceCustomAccessor{
|
||||
retention: &configv1alpha1.LocalValueRetention{LuaScript: "first-retention"},
|
||||
replicaResource: &configv1alpha1.ReplicaResourceRequirement{LuaScript: "first-replica"},
|
||||
statusReflection: &configv1alpha1.StatusReflection{LuaScript: "second-status"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configManager := &interpreterConfigManager{}
|
||||
configManager.configuration.Store(make(map[schema.GroupVersionKind]CustomAccessor))
|
||||
|
||||
configManager.LoadConfig(tt.configs)
|
||||
|
||||
got := configManager.CustomAccessors()
|
||||
|
||||
if len(got) != len(tt.want) {
|
||||
t.Errorf("LoadConfig() got %d accessors, want %d", len(got), len(tt.want))
|
||||
}
|
||||
|
||||
for gvk, wantAccessor := range tt.want {
|
||||
gotAccessor, exists := got[gvk]
|
||||
if !exists {
|
||||
t.Errorf("LoadConfig() missing accessor for GVK %v", gvk)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(gotAccessor, wantAccessor) {
|
||||
t.Errorf("LoadConfig() accessor for GVK %v = %v, want %v", gvk, gotAccessor, wantAccessor)
|
||||
}
|
||||
}
|
||||
|
||||
if !configManager.initialSynced.Load() {
|
||||
t.Errorf("LoadConfig() should set initialSynced to true")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_interpreterConfigManager_updateConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupManager func() *interpreterConfigManager
|
||||
wantErr bool
|
||||
expectedErrMsg string
|
||||
}{
|
||||
{
|
||||
name: "informer not initialized",
|
||||
setupManager: func() *interpreterConfigManager {
|
||||
return &interpreterConfigManager{
|
||||
informer: nil,
|
||||
}
|
||||
},
|
||||
wantErr: true,
|
||||
expectedErrMsg: "informer manager is not configured",
|
||||
},
|
||||
{
|
||||
name: "informer not synced",
|
||||
setupManager: func() *interpreterConfigManager {
|
||||
mockInformer := &mockSingleClusterInformerManager{
|
||||
isSynced: false,
|
||||
}
|
||||
return &interpreterConfigManager{
|
||||
informer: mockInformer,
|
||||
}
|
||||
},
|
||||
wantErr: true,
|
||||
expectedErrMsg: "informer of ResourceInterpreterCustomization not synced",
|
||||
},
|
||||
{
|
||||
name: "lister list error",
|
||||
setupManager: func() *interpreterConfigManager {
|
||||
mockInformer := &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
}
|
||||
mockLister := &mockGenericLister{
|
||||
listErr: errors.New("list error"),
|
||||
}
|
||||
return &interpreterConfigManager{
|
||||
informer: mockInformer,
|
||||
lister: mockLister,
|
||||
}
|
||||
},
|
||||
wantErr: true,
|
||||
expectedErrMsg: "list error",
|
||||
},
|
||||
{
|
||||
name: "successful update with empty list",
|
||||
setupManager: func() *interpreterConfigManager {
|
||||
mockInformer := &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
}
|
||||
mockLister := &mockGenericLister{
|
||||
items: []runtime.Object{},
|
||||
}
|
||||
manager := &interpreterConfigManager{
|
||||
informer: mockInformer,
|
||||
lister: mockLister,
|
||||
}
|
||||
manager.configuration.Store(make(map[schema.GroupVersionKind]CustomAccessor))
|
||||
return manager
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configManager := tt.setupManager()
|
||||
|
||||
err := configManager.updateConfiguration()
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("updateConfiguration() expected error but got nil")
|
||||
return
|
||||
}
|
||||
if tt.expectedErrMsg != "" && err.Error() != tt.expectedErrMsg {
|
||||
t.Errorf("updateConfiguration() error = %v, want %v", err.Error(), tt.expectedErrMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("updateConfiguration() unexpected error = %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Mock implementations for testing
|
||||
type mockSingleClusterInformerManager struct {
|
||||
isSynced bool
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) IsInformerSynced(_ schema.GroupVersionResource) bool {
|
||||
return m.isSynced
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Lister(_ schema.GroupVersionResource) cache.GenericLister {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) ForResource(_ schema.GroupVersionResource, _ cache.ResourceEventHandler) {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Start() {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Stop() {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) WaitForCacheSync() map[schema.GroupVersionResource]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) WaitForCacheSyncWithTimeout(_ time.Duration) map[schema.GroupVersionResource]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) GetClient() dynamic.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) IsHandlerExist(_ schema.GroupVersionResource, _ cache.ResourceEventHandler) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type mockGenericLister struct {
|
||||
items []runtime.Object
|
||||
listErr error
|
||||
}
|
||||
|
||||
func (m *mockGenericLister) List(_ labels.Selector) ([]runtime.Object, error) {
|
||||
if m.listErr != nil {
|
||||
return nil, m.listErr
|
||||
}
|
||||
return m.items, nil
|
||||
}
|
||||
|
||||
func (m *mockGenericLister) Get(_ string) (runtime.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockGenericLister) ByNamespace(_ string) cache.GenericNamespaceLister {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,37 +17,38 @@ limitations under the License.
|
|||
package configmanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
var resourceExploringWebhookConfigurationsGVR = schema.GroupVersionResource{
|
||||
Group: configv1alpha1.GroupVersion.Group,
|
||||
Version: configv1alpha1.GroupVersion.Version,
|
||||
Resource: "resourceinterpreterwebhookconfigurations",
|
||||
}
|
||||
|
||||
// ConfigManager can list dynamic webhooks.
|
||||
type ConfigManager interface {
|
||||
HookAccessors() []WebhookAccessor
|
||||
HasSynced() bool
|
||||
// LoadConfig is used to load ResourceInterpreterWebhookConfiguration into the cache,
|
||||
// it requires the provided webhookConfigurations to be a full list of objects.
|
||||
// It is recommended to be called during startup. After called, HasSynced() will always
|
||||
// return true, and HookAccessors() will return a list of WebhookAccessor containing
|
||||
// all ResourceInterpreterWebhookConfiguration configurations.
|
||||
LoadConfig(webhookConfigurations []*configv1alpha1.ResourceInterpreterWebhookConfiguration)
|
||||
}
|
||||
|
||||
// interpreterConfigManager collect the resource interpreter webhook configuration.
|
||||
type interpreterConfigManager struct {
|
||||
configuration atomic.Value
|
||||
informer genericmanager.SingleClusterInformerManager
|
||||
lister cache.GenericLister
|
||||
initialSynced atomic.Bool
|
||||
}
|
||||
|
@ -63,61 +64,68 @@ func (m *interpreterConfigManager) HasSynced() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
if configuration, err := m.lister.List(labels.Everything()); err == nil && len(configuration) == 0 {
|
||||
// the empty list we initially stored is valid to use.
|
||||
// Setting initialSynced to true, so subsequent checks
|
||||
// would be able to take the fast path on the atomic boolean in a
|
||||
// cluster without any webhooks configured.
|
||||
m.initialSynced.Store(true)
|
||||
// the informer has synced, and we don't have any items
|
||||
return true
|
||||
err := m.updateConfiguration()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error updating configuration")
|
||||
return false
|
||||
}
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
// NewExploreConfigManager return a new interpreterConfigManager with resourceinterpreterwebhookconfigurations handlers.
|
||||
func NewExploreConfigManager(inform genericmanager.SingleClusterInformerManager) ConfigManager {
|
||||
manager := &interpreterConfigManager{
|
||||
lister: inform.Lister(resourceExploringWebhookConfigurationsGVR),
|
||||
lister: inform.Lister(util.ResourceInterpreterWebhookConfigurationsGVR),
|
||||
}
|
||||
|
||||
manager.configuration.Store([]WebhookAccessor{})
|
||||
|
||||
manager.informer = inform
|
||||
configHandlers := fedinformer.NewHandlerOnEvents(
|
||||
func(_ interface{}) { manager.updateConfiguration() },
|
||||
func(_, _ interface{}) { manager.updateConfiguration() },
|
||||
func(_ interface{}) { manager.updateConfiguration() })
|
||||
inform.ForResource(resourceExploringWebhookConfigurationsGVR, configHandlers)
|
||||
func(_ interface{}) { _ = manager.updateConfiguration() },
|
||||
func(_, _ interface{}) { _ = manager.updateConfiguration() },
|
||||
func(_ interface{}) { _ = manager.updateConfiguration() })
|
||||
inform.ForResource(util.ResourceInterpreterWebhookConfigurationsGVR, configHandlers)
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
func (m *interpreterConfigManager) updateConfiguration() {
|
||||
// updateConfiguration is used as the event handler for the ResourceInterpreterWebhookConfiguration resource.
|
||||
// Any changes (add, update, delete) to these resources will trigger this method, which loads all
|
||||
// ResourceInterpreterWebhookConfiguration resources and refreshes the internal cache accordingly.
|
||||
// Note: During startup, some events may be missed if the informer has not yet synced. If all events
|
||||
// are missed during startup, updateConfiguration will be called when HasSynced() is invoked for the
|
||||
// first time, ensuring the cache is updated on first use.
|
||||
func (m *interpreterConfigManager) updateConfiguration() error {
|
||||
if m.informer == nil {
|
||||
return errors.New("informer manager is not configured")
|
||||
}
|
||||
if !m.informer.IsInformerSynced(util.ResourceInterpreterWebhookConfigurationsGVR) {
|
||||
return errors.New("informer of ResourceInterpreterWebhookConfiguration not synced")
|
||||
}
|
||||
|
||||
configurations, err := m.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error updating configuration: %v", err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
configs := make([]*configv1alpha1.ResourceInterpreterWebhookConfiguration, 0)
|
||||
for _, c := range configurations {
|
||||
unstructuredConfig, err := helper.ToUnstructured(c)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to transform ResourceInterpreterWebhookConfiguration: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
configs := make([]*configv1alpha1.ResourceInterpreterWebhookConfiguration, len(configurations))
|
||||
for index, c := range configurations {
|
||||
config := &configv1alpha1.ResourceInterpreterWebhookConfiguration{}
|
||||
err = helper.ConvertToTypedObject(unstructuredConfig, config)
|
||||
err = helper.ConvertToTypedObject(c, config)
|
||||
if err != nil {
|
||||
gvk := unstructuredConfig.GroupVersionKind().String()
|
||||
klog.Errorf("Failed to convert object(%s), err: %v", gvk, err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
configs = append(configs, config)
|
||||
configs[index] = config
|
||||
}
|
||||
|
||||
m.configuration.Store(mergeResourceExploreWebhookConfigurations(configs))
|
||||
m.LoadConfig(configs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the webhook configurations and updates the initialSynced flag to true.
|
||||
func (m *interpreterConfigManager) LoadConfig(webhookConfigurations []*configv1alpha1.ResourceInterpreterWebhookConfiguration) {
|
||||
m.configuration.Store(mergeResourceExploreWebhookConfigurations(webhookConfigurations))
|
||||
m.initialSynced.Store(true)
|
||||
}
|
||||
|
||||
|
|
|
@ -17,18 +17,22 @@ limitations under the License.
|
|||
package configmanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
func TestNewExploreConfigManager(t *testing.T) {
|
||||
|
@ -62,6 +66,10 @@ func TestNewExploreConfigManager(t *testing.T) {
|
|||
|
||||
assert.NotNil(t, manager, "Manager should not be nil")
|
||||
assert.NotNil(t, manager.HookAccessors(), "Accessors should be initialized")
|
||||
|
||||
internalManager, ok := manager.(*interpreterConfigManager)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, informerManager, internalManager.informer)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -70,6 +78,7 @@ func TestHasSynced(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
initialSynced bool
|
||||
informer genericmanager.SingleClusterInformerManager
|
||||
listErr error
|
||||
listResult []runtime.Object
|
||||
expectedSynced bool
|
||||
|
@ -80,24 +89,47 @@ func TestHasSynced(t *testing.T) {
|
|||
expectedSynced: true,
|
||||
},
|
||||
{
|
||||
name: "not synced but empty list",
|
||||
name: "informer not configured",
|
||||
initialSynced: false,
|
||||
informer: nil,
|
||||
expectedSynced: false,
|
||||
},
|
||||
{
|
||||
name: "informer not synced",
|
||||
initialSynced: false,
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: false,
|
||||
},
|
||||
expectedSynced: false,
|
||||
},
|
||||
{
|
||||
name: "sync with empty list",
|
||||
initialSynced: false,
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
listResult: []runtime.Object{},
|
||||
expectedSynced: true,
|
||||
},
|
||||
{
|
||||
name: "not synced with items",
|
||||
name: "sync with items",
|
||||
initialSynced: false,
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
listResult: []runtime.Object{
|
||||
&configv1alpha1.ResourceInterpreterWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
},
|
||||
},
|
||||
expectedSynced: false,
|
||||
expectedSynced: true,
|
||||
},
|
||||
{
|
||||
name: "list error",
|
||||
initialSynced: false,
|
||||
name: "list error",
|
||||
initialSynced: false,
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
listErr: fmt.Errorf("test error"),
|
||||
expectedSynced: false,
|
||||
},
|
||||
|
@ -105,10 +137,23 @@ func TestHasSynced(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Convert typed objects to unstructured objects for proper testing
|
||||
unstructuredItems := make([]runtime.Object, len(tt.listResult))
|
||||
for i, config := range tt.listResult {
|
||||
if config != nil {
|
||||
unstructuredObj, err := helper.ToUnstructured(config)
|
||||
assert.NoError(t, err)
|
||||
unstructuredItems[i] = unstructuredObj
|
||||
} else {
|
||||
unstructuredItems[i] = config
|
||||
}
|
||||
}
|
||||
|
||||
manager := &interpreterConfigManager{
|
||||
informer: tt.informer,
|
||||
lister: &mockLister{
|
||||
err: tt.listErr,
|
||||
items: tt.listResult,
|
||||
items: unstructuredItems,
|
||||
},
|
||||
}
|
||||
manager.initialSynced.Store(tt.initialSynced)
|
||||
|
@ -181,12 +226,30 @@ func TestUpdateConfiguration(t *testing.T) {
|
|||
name string
|
||||
configs []runtime.Object
|
||||
listErr error
|
||||
informer genericmanager.SingleClusterInformerManager
|
||||
expectedCount int
|
||||
wantSynced bool
|
||||
}{
|
||||
{
|
||||
name: "empty configuration",
|
||||
configs: []runtime.Object{},
|
||||
name: "informer not configured",
|
||||
informer: nil,
|
||||
expectedCount: 0,
|
||||
wantSynced: false,
|
||||
},
|
||||
{
|
||||
name: "informer not synced",
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: false,
|
||||
},
|
||||
expectedCount: 0,
|
||||
wantSynced: false,
|
||||
},
|
||||
{
|
||||
name: "empty configuration",
|
||||
configs: []runtime.Object{},
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
expectedCount: 0,
|
||||
wantSynced: true,
|
||||
},
|
||||
|
@ -204,13 +267,19 @@ func TestUpdateConfiguration(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
expectedCount: 1,
|
||||
wantSynced: true,
|
||||
},
|
||||
{
|
||||
name: "list error",
|
||||
configs: []runtime.Object{},
|
||||
listErr: fmt.Errorf("test error"),
|
||||
name: "list error",
|
||||
configs: []runtime.Object{},
|
||||
listErr: fmt.Errorf("test error"),
|
||||
informer: &mockSingleClusterInformerManager{
|
||||
isSynced: true,
|
||||
},
|
||||
expectedCount: 0,
|
||||
wantSynced: false,
|
||||
},
|
||||
|
@ -218,26 +287,80 @@ func TestUpdateConfiguration(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Convert typed objects to unstructured objects for proper testing
|
||||
unstructuredItems := make([]runtime.Object, len(tt.configs))
|
||||
for i, config := range tt.configs {
|
||||
if config != nil {
|
||||
unstructuredObj, err := helper.ToUnstructured(config)
|
||||
assert.NoError(t, err)
|
||||
unstructuredItems[i] = unstructuredObj
|
||||
} else {
|
||||
unstructuredItems[i] = config
|
||||
}
|
||||
}
|
||||
|
||||
manager := &interpreterConfigManager{
|
||||
lister: &mockLister{
|
||||
items: tt.configs,
|
||||
items: unstructuredItems,
|
||||
err: tt.listErr,
|
||||
},
|
||||
informer: tt.informer,
|
||||
}
|
||||
manager.configuration.Store([]WebhookAccessor{})
|
||||
manager.initialSynced.Store(false)
|
||||
|
||||
manager.updateConfiguration()
|
||||
synced := manager.HasSynced()
|
||||
assert.Equal(t, tt.wantSynced, synced)
|
||||
|
||||
accessors := manager.HookAccessors()
|
||||
assert.Equal(t, tt.expectedCount, len(accessors))
|
||||
assert.Equal(t, tt.wantSynced, manager.HasSynced())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Mock Implementations
|
||||
|
||||
type mockSingleClusterInformerManager struct {
|
||||
isSynced bool
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) IsInformerSynced(_ schema.GroupVersionResource) bool {
|
||||
return m.isSynced
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Lister(_ schema.GroupVersionResource) cache.GenericLister {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) ForResource(_ schema.GroupVersionResource, _ cache.ResourceEventHandler) {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Start() {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Stop() {
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) WaitForCacheSync() map[schema.GroupVersionResource]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) WaitForCacheSyncWithTimeout(_ time.Duration) map[schema.GroupVersionResource]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) GetClient() dynamic.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSingleClusterInformerManager) IsHandlerExist(_ schema.GroupVersionResource, _ cache.ResourceEventHandler) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type mockLister struct {
|
||||
items []runtime.Object
|
||||
err error
|
||||
|
|
|
@ -355,3 +355,8 @@ func (e *CustomizedInterpreter) InterpretHealth(ctx context.Context, attributes
|
|||
|
||||
return response.Healthy, matched, nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the webhook configurations.
|
||||
func (e *CustomizedInterpreter) LoadConfig(webhookConfigurations []*configv1alpha1.ResourceInterpreterWebhookConfiguration) {
|
||||
e.hookManager.LoadConfig(webhookConfigurations)
|
||||
}
|
||||
|
|
|
@ -1085,6 +1085,12 @@ func (m *mockConfigManager) HookAccessors() []configmanager.WebhookAccessor {
|
|||
return m.hooks
|
||||
}
|
||||
|
||||
func (m *mockConfigManager) LoadConfig(_ []*configv1alpha1.ResourceInterpreterWebhookConfiguration) {
|
||||
// Mock implementation: in a real test, we might want to process the configurations
|
||||
// and update the hooks accordingly. For now, this is a no-op implementation.
|
||||
// This allows the mock to satisfy the ConfigManager interface.
|
||||
}
|
||||
|
||||
// mockWebhookAccessor implements configmanager.WebhookAccessor interface for testing
|
||||
type mockWebhookAccessor struct {
|
||||
uid string
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
corev1 "k8s.io/client-go/listers/core/v1"
|
||||
|
@ -32,12 +33,14 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/resourceinterpreter/customized/webhook/request"
|
||||
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native"
|
||||
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/thirdparty"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
// ResourceInterpreter manages both default and customized webhooks to interpret custom resource structure.
|
||||
type ResourceInterpreter interface {
|
||||
// Start starts running the component and will never stop running until the context is closed or an error occurs.
|
||||
// Start initializes the resource interpreter and performs cache synchronization.
|
||||
Start(ctx context.Context) (err error)
|
||||
|
||||
// HookEnabled tells if any hook exist for specific resource type and operation.
|
||||
|
@ -85,13 +88,16 @@ type customResourceInterpreterImpl struct {
|
|||
defaultInterpreter *native.DefaultInterpreter
|
||||
}
|
||||
|
||||
// Start starts running the component and will never stop running until the context is closed or an error occurs.
|
||||
func (i *customResourceInterpreterImpl) Start(ctx context.Context) (err error) {
|
||||
klog.Infof("Starting custom resource interpreter.")
|
||||
// Start initializes all interpreters and load all ResourceInterpreterCustomization and
|
||||
// ResourceInterpreterWebhookConfiguration configurations into the cache.
|
||||
// It is recommended to be called before all controllers. After called, the resource interpreter
|
||||
// will be ready to interpret custom resources.
|
||||
func (i *customResourceInterpreterImpl) Start(_ context.Context) (err error) {
|
||||
klog.Infoln("Starting resource interpreter.")
|
||||
|
||||
i.customizedInterpreter, err = webhook.NewCustomizedInterpreter(i.informer, i.serviceLister)
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
i.configurableInterpreter = declarative.NewConfigurableInterpreter(i.informer)
|
||||
|
||||
|
@ -100,8 +106,12 @@ func (i *customResourceInterpreterImpl) Start(ctx context.Context) (err error) {
|
|||
|
||||
i.informer.Start()
|
||||
i.informer.WaitForCacheSync()
|
||||
<-ctx.Done()
|
||||
klog.Infof("Stopped as stopCh closed.")
|
||||
|
||||
if err = i.loadConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infoln("Resource interpreter started.")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -339,3 +349,46 @@ func (i *customResourceInterpreterImpl) InterpretHealth(object *unstructured.Uns
|
|||
healthy, err = i.defaultInterpreter.InterpretHealth(object)
|
||||
return
|
||||
}
|
||||
|
||||
// loadConfig loads the full set of ResourceInterpreterCustomization and
|
||||
// ResourceInterpreterWebhookConfiguration configurations into the cache. It avoids resource interpreter
|
||||
// parsing errors when the resource interpreter starts and the cache is not synchronized.
|
||||
func (i *customResourceInterpreterImpl) loadConfig() error {
|
||||
customizations, err := i.informer.Lister(util.ResourceInterpreterCustomizationsGVR).List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list resourceinterpretercustomizations: %v", err)
|
||||
return err
|
||||
}
|
||||
klog.V(5).Infof("Found %d resourceinterpretercustomizations", len(customizations))
|
||||
|
||||
declareConfigs := make([]*configv1alpha1.ResourceInterpreterCustomization, len(customizations))
|
||||
for index, c := range customizations {
|
||||
config := &configv1alpha1.ResourceInterpreterCustomization{}
|
||||
if err = helper.ConvertToTypedObject(c, config); err != nil {
|
||||
klog.Errorf("Failed to convert resourceinterpretercustomization: %v", err)
|
||||
return err
|
||||
}
|
||||
declareConfigs[index] = config
|
||||
}
|
||||
i.configurableInterpreter.LoadConfig(declareConfigs)
|
||||
|
||||
webhooks, err := i.informer.Lister(util.ResourceInterpreterWebhookConfigurationsGVR).List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list resourceinterpreterwebhookconfigurations: %v", err)
|
||||
return err
|
||||
}
|
||||
klog.V(5).Infof("Found %d resourceinterpreterwebhookconfigurations", len(webhooks))
|
||||
|
||||
webhookConfigs := make([]*configv1alpha1.ResourceInterpreterWebhookConfiguration, len(webhooks))
|
||||
for index, c := range webhooks {
|
||||
config := &configv1alpha1.ResourceInterpreterWebhookConfiguration{}
|
||||
if err = helper.ConvertToTypedObject(c, config); err != nil {
|
||||
klog.Errorf("Failed to convert resourceinterpreterwebhookconfiguration: %v", err)
|
||||
return err
|
||||
}
|
||||
webhookConfigs[index] = config
|
||||
}
|
||||
i.customizedInterpreter.LoadConfig(webhookConfigs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -332,6 +332,12 @@ func (s *Scheduler) enqueueAffectedBindings(cluster *clusterv1alpha1.Cluster) er
|
|||
// never reach here
|
||||
continue
|
||||
}
|
||||
if !schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
continue
|
||||
}
|
||||
if binding.Spec.SchedulingSuspended() {
|
||||
continue
|
||||
}
|
||||
|
||||
var affinity *policyv1alpha1.ClusterAffinity
|
||||
if placementPtr.ClusterAffinities != nil {
|
||||
|
@ -340,9 +346,7 @@ func (s *Scheduler) enqueueAffectedBindings(cluster *clusterv1alpha1.Cluster) er
|
|||
// for scheduling or its status has not been synced to the
|
||||
// cache. Just enqueue the binding to avoid missing the cluster
|
||||
// update event.
|
||||
if schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
s.onResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
s.onResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
continue
|
||||
}
|
||||
affinityIndex := getAffinityIndex(placementPtr.ClusterAffinities, binding.Status.SchedulerObservedAffinityName)
|
||||
|
@ -357,9 +361,7 @@ func (s *Scheduler) enqueueAffectedBindings(cluster *clusterv1alpha1.Cluster) er
|
|||
fallthrough
|
||||
case util.ClusterMatches(cluster, *affinity):
|
||||
// If the cluster manifest match the affinity, add it to the queue, trigger rescheduling
|
||||
if schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
s.onResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
s.onResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,6 +379,12 @@ func (s *Scheduler) enqueueAffectedCRBs(cluster *clusterv1alpha1.Cluster) error
|
|||
// never reach here
|
||||
continue
|
||||
}
|
||||
if !schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
continue
|
||||
}
|
||||
if binding.Spec.SchedulingSuspended() {
|
||||
continue
|
||||
}
|
||||
|
||||
var affinity *policyv1alpha1.ClusterAffinity
|
||||
if placementPtr.ClusterAffinities != nil {
|
||||
|
@ -385,9 +393,7 @@ func (s *Scheduler) enqueueAffectedCRBs(cluster *clusterv1alpha1.Cluster) error
|
|||
// for scheduling or its status has not been synced to the
|
||||
// cache. Just enqueue the binding to avoid missing the cluster
|
||||
// update event.
|
||||
if schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
s.onClusterResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
s.onClusterResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
continue
|
||||
}
|
||||
affinityIndex := getAffinityIndex(placementPtr.ClusterAffinities, binding.Status.SchedulerObservedAffinityName)
|
||||
|
@ -402,9 +408,7 @@ func (s *Scheduler) enqueueAffectedCRBs(cluster *clusterv1alpha1.Cluster) error
|
|||
fallthrough
|
||||
case util.ClusterMatches(cluster, *affinity):
|
||||
// If the cluster manifest match the affinity, add it to the queue, trigger rescheduling
|
||||
if schedulerNameFilter(s.schedulerName, binding.Spec.SchedulerName) {
|
||||
s.onClusterResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
s.onClusterResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -796,7 +796,7 @@ func (s *Scheduler) handleErr(err error, bindingInfo *internalqueue.QueuedBindin
|
|||
}
|
||||
|
||||
var unschedulableErr *framework.UnschedulableError
|
||||
if !errors.As(err, &unschedulableErr) {
|
||||
if errors.As(err, &unschedulableErr) {
|
||||
s.priorityQueue.PushUnschedulableIfNotPresent(bindingInfo)
|
||||
} else {
|
||||
s.priorityQueue.PushBackoffIfNotPresent(bindingInfo)
|
||||
|
|
|
@ -110,3 +110,23 @@ func RescheduleRequired(rescheduleTriggeredAt, lastScheduledTime *metav1.Time) b
|
|||
}
|
||||
return rescheduleTriggeredAt.After(lastScheduledTime.Time)
|
||||
}
|
||||
|
||||
// MergePolicySuspension merges the suspension configuration from policy to binding suspension.
|
||||
func MergePolicySuspension(bindingSuspension *workv1alpha2.Suspension, policySuspension *policyv1alpha1.Suspension) *workv1alpha2.Suspension {
|
||||
if policySuspension != nil {
|
||||
if bindingSuspension == nil {
|
||||
bindingSuspension = &workv1alpha2.Suspension{}
|
||||
}
|
||||
bindingSuspension.Suspension = *policySuspension
|
||||
return bindingSuspension
|
||||
}
|
||||
// policySuspension is nil, clean up binding's suspension part.
|
||||
if bindingSuspension == nil {
|
||||
return nil
|
||||
}
|
||||
bindingSuspension.Suspension = policyv1alpha1.Suspension{}
|
||||
if bindingSuspension.Scheduling == nil {
|
||||
return nil
|
||||
}
|
||||
return bindingSuspension
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
|
@ -419,3 +420,87 @@ func TestRescheduleRequired(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergePolicySuspension(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bindingSuspension *workv1alpha2.Suspension
|
||||
policySuspension *policyv1alpha1.Suspension
|
||||
want *workv1alpha2.Suspension
|
||||
}{
|
||||
{
|
||||
name: "both nil returns nil",
|
||||
bindingSuspension: nil,
|
||||
policySuspension: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "binding suspension only preserves scheduling when policy suspension nil",
|
||||
bindingSuspension: &workv1alpha2.Suspension{
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
policySuspension: nil,
|
||||
want: &workv1alpha2.Suspension{
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cleanup of binding suspension preserves scheduling field",
|
||||
bindingSuspension: &workv1alpha2.Suspension{
|
||||
Suspension: policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
policySuspension: nil,
|
||||
want: &workv1alpha2.Suspension{
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if the scheduling not set and policy suspension nil, will return nil",
|
||||
bindingSuspension: &workv1alpha2.Suspension{
|
||||
Suspension: policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
},
|
||||
policySuspension: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "policy suspension set and no existing binding creates new suspension from policy",
|
||||
bindingSuspension: nil,
|
||||
policySuspension: &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
want: &workv1alpha2.Suspension{
|
||||
Suspension: policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should merge policy suspension and binding suspension",
|
||||
bindingSuspension: &workv1alpha2.Suspension{
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
policySuspension: &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
want: &workv1alpha2.Suspension{
|
||||
Suspension: policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
},
|
||||
Scheduling: ptr.To(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := MergePolicySuspension(tt.bindingSuspension, tt.policySuspension)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("MergePolicySuspension() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ const (
|
|||
type ClusterRegisterOption struct {
|
||||
ClusterNamespace string
|
||||
ClusterName string
|
||||
ClusterID string
|
||||
ReportSecrets []string
|
||||
ClusterAPIEndpoint string
|
||||
ProxyServerAddress string
|
||||
|
@ -64,11 +65,10 @@ type ClusterRegisterOption struct {
|
|||
ClusterConfig *rest.Config
|
||||
Secret corev1.Secret
|
||||
ImpersonatorSecret corev1.Secret
|
||||
ClusterID string
|
||||
}
|
||||
|
||||
// IsKubeCredentialsEnabled represents whether report secret
|
||||
func (r ClusterRegisterOption) IsKubeCredentialsEnabled() bool {
|
||||
func (r *ClusterRegisterOption) IsKubeCredentialsEnabled() bool {
|
||||
for _, sct := range r.ReportSecrets {
|
||||
if sct == KubeCredentials {
|
||||
return true
|
||||
|
@ -78,7 +78,7 @@ func (r ClusterRegisterOption) IsKubeCredentialsEnabled() bool {
|
|||
}
|
||||
|
||||
// IsKubeImpersonatorEnabled represents whether report impersonator secret
|
||||
func (r ClusterRegisterOption) IsKubeImpersonatorEnabled() bool {
|
||||
func (r *ClusterRegisterOption) IsKubeImpersonatorEnabled() bool {
|
||||
for _, sct := range r.ReportSecrets {
|
||||
if sct == KubeImpersonator {
|
||||
return true
|
||||
|
@ -87,6 +87,45 @@ func (r ClusterRegisterOption) IsKubeImpersonatorEnabled() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Validate validates the cluster register option, including clusterID, cluster name and so on.
|
||||
func (r *ClusterRegisterOption) Validate(karmadaClient karmadaclientset.Interface, isAgent bool) error {
|
||||
clusterList, err := karmadaClient.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterIDUsed, clusterNameUsed, sameCluster := r.validateCluster(clusterList)
|
||||
if isAgent && sameCluster {
|
||||
return nil
|
||||
}
|
||||
|
||||
if clusterIDUsed || clusterNameUsed {
|
||||
return fmt.Errorf("the cluster ID %s or the cluster name %s has been registered", r.ClusterID, r.ClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateCluster validates the cluster register option whether the cluster name and cluster ID are unique.
|
||||
// 1. When registering a cluster for the first time, the metrics `clusterIDUsed` and `clusterNameUsed` can be used
|
||||
// to check if the cluster ID and the cluster name have already been used, which can avoid duplicate registrations.
|
||||
// 2. In cases where the agent is restarted, the metric `sameCluster` can be used to determine if the cluster
|
||||
// specified in the `RegisterOption` has already been registered, aiming to achieve the purpose of re-entering and updating the cluster.
|
||||
func (r *ClusterRegisterOption) validateCluster(clusterList *clusterv1alpha1.ClusterList) (clusterIDUsed, clusterNameUsed, sameCluster bool) {
|
||||
for _, cluster := range clusterList.Items {
|
||||
if cluster.Spec.ID == r.ClusterID && cluster.GetName() == r.ClusterName {
|
||||
return true, true, true
|
||||
}
|
||||
if cluster.Spec.ID == r.ClusterID {
|
||||
clusterIDUsed = true
|
||||
}
|
||||
if cluster.GetName() == r.ClusterName {
|
||||
clusterNameUsed = true
|
||||
}
|
||||
}
|
||||
|
||||
return clusterIDUsed, clusterNameUsed, false
|
||||
}
|
||||
|
||||
// IsClusterReady tells whether the cluster status in 'Ready' condition.
|
||||
func IsClusterReady(clusterStatus *clusterv1alpha1.ClusterStatus) bool {
|
||||
return meta.IsStatusConditionTrue(clusterStatus.Conditions, clusterv1alpha1.ClusterConditionReady)
|
||||
|
@ -208,21 +247,6 @@ func ObtainClusterID(clusterKubeClient kubernetes.Interface) (string, error) {
|
|||
return string(ns.UID), nil
|
||||
}
|
||||
|
||||
// IsClusterIdentifyUnique checks whether the ClusterID exists in the karmada control plane.
|
||||
func IsClusterIdentifyUnique(controlPlaneClient karmadaclientset.Interface, id string) (bool, string, error) {
|
||||
clusterList, err := controlPlaneClient.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
for _, cluster := range clusterList.Items {
|
||||
if cluster.Spec.ID == id {
|
||||
return false, cluster.Name, nil
|
||||
}
|
||||
}
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
// ClusterAccessCredentialChanged checks whether the cluster access credential changed
|
||||
func ClusterAccessCredentialChanged(newSpec, oldSpec clusterv1alpha1.ClusterSpec) bool {
|
||||
if oldSpec.APIEndpoint == newSpec.APIEndpoint &&
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -54,11 +53,6 @@ func withSyncMode(cluster *clusterv1alpha1.Cluster, syncMode clusterv1alpha1.Clu
|
|||
return cluster
|
||||
}
|
||||
|
||||
func withID(cluster *clusterv1alpha1.Cluster, id string) *clusterv1alpha1.Cluster {
|
||||
cluster.Spec.ID = id
|
||||
return cluster
|
||||
}
|
||||
|
||||
func TestCreateOrUpdateClusterObject(t *testing.T) {
|
||||
type args struct {
|
||||
controlPlaneClient karmadaclientset.Interface
|
||||
|
@ -167,48 +161,6 @@ func TestCreateOrUpdateClusterObject(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIsClusterIDUnique(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existedCluster []runtime.Object
|
||||
id string
|
||||
want bool
|
||||
clustername string
|
||||
}{
|
||||
{
|
||||
name: "no cluster", id: "1", want: true,
|
||||
existedCluster: []runtime.Object{},
|
||||
},
|
||||
{
|
||||
name: "existed id", id: "1", want: false, clustername: "cluster-1",
|
||||
existedCluster: []runtime.Object{withID(newCluster("cluster-1"), "1")},
|
||||
},
|
||||
{
|
||||
name: "unique id", id: "2", want: true,
|
||||
existedCluster: []runtime.Object{withID(newCluster("cluster-1"), "1")},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := karmadaclientsetfake.NewSimpleClientset(tc.existedCluster...)
|
||||
|
||||
ok, name, err := IsClusterIdentifyUnique(fakeClient, tc.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ok != tc.want {
|
||||
t.Errorf("expected value: %v, but got: %v", tc.want, ok)
|
||||
}
|
||||
|
||||
if !ok && name != tc.clustername {
|
||||
t.Errorf("expected clustername: %v, but got: %v", tc.clustername, name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterRegisterOption_IsKubeCredentialsEnabled(t *testing.T) {
|
||||
type fields struct {
|
||||
ReportSecrets []string
|
||||
|
@ -323,6 +275,106 @@ func TestClusterRegisterOption_IsKubeImpersonatorEnabled(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClusterRegisterOption_ValidateCluster(t *testing.T) {
|
||||
registeredClusterList := &clusterv1alpha1.ClusterList{
|
||||
Items: []clusterv1alpha1.Cluster{
|
||||
{
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
ID: "1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "member1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
ID: "2",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "member2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
ID: "3",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "member3",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testItems := []struct {
|
||||
name string
|
||||
clusterList *clusterv1alpha1.ClusterList
|
||||
opts ClusterRegisterOption
|
||||
expectedClusterIDUsed bool
|
||||
expectedClusterNameUsed bool
|
||||
expectedSameCluster bool
|
||||
}{
|
||||
{
|
||||
name: "registering a brand new cluster",
|
||||
clusterList: registeredClusterList,
|
||||
opts: ClusterRegisterOption{
|
||||
ClusterID: "4",
|
||||
ClusterName: "member4",
|
||||
},
|
||||
expectedClusterIDUsed: false,
|
||||
expectedClusterNameUsed: false,
|
||||
expectedSameCluster: false,
|
||||
},
|
||||
{
|
||||
name: "clusterName is used",
|
||||
clusterList: registeredClusterList,
|
||||
opts: ClusterRegisterOption{
|
||||
ClusterID: "4",
|
||||
ClusterName: "member2",
|
||||
},
|
||||
expectedClusterIDUsed: false,
|
||||
expectedClusterNameUsed: true,
|
||||
expectedSameCluster: false,
|
||||
},
|
||||
{
|
||||
name: "clusterID is used",
|
||||
clusterList: registeredClusterList,
|
||||
opts: ClusterRegisterOption{
|
||||
ClusterID: "2",
|
||||
ClusterName: "member4",
|
||||
},
|
||||
expectedClusterIDUsed: true,
|
||||
expectedClusterNameUsed: false,
|
||||
expectedSameCluster: false,
|
||||
},
|
||||
{
|
||||
name: "the same cluster",
|
||||
clusterList: registeredClusterList,
|
||||
opts: ClusterRegisterOption{
|
||||
ClusterID: "2",
|
||||
ClusterName: "member2",
|
||||
},
|
||||
expectedClusterIDUsed: true,
|
||||
expectedClusterNameUsed: true,
|
||||
expectedSameCluster: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range testItems {
|
||||
t.Run(item.name, func(t *testing.T) {
|
||||
clusterIDUsed, clusterNameUsed, sameCluster := item.opts.validateCluster(item.clusterList)
|
||||
if clusterIDUsed != item.expectedClusterIDUsed {
|
||||
t.Errorf("clusterNameUsed = %v, want %v", clusterIDUsed, item.expectedClusterIDUsed)
|
||||
}
|
||||
if clusterNameUsed != item.expectedClusterNameUsed {
|
||||
t.Errorf("clusterNameUsed = %v, want %v", clusterNameUsed, item.expectedClusterNameUsed)
|
||||
}
|
||||
if sameCluster != item.expectedSameCluster {
|
||||
t.Errorf("clusterNameUsed = %v, want %v", sameCluster, item.expectedSameCluster)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateClusterObject(t *testing.T) {
|
||||
type args struct {
|
||||
controlPlaneClient karmadaclientset.Interface
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
|
@ -62,9 +63,6 @@ const (
|
|||
// managed by karmada controllers.
|
||||
KarmadaSystemLabel = "karmada.io/system"
|
||||
|
||||
// EndpointSliceDispatchControllerLabelValue indicates the endpointSlice are controlled by Karmada
|
||||
EndpointSliceDispatchControllerLabelValue = "endpointslice-dispatch-controller.karmada.io"
|
||||
|
||||
// RetainReplicasLabel is a reserved label to indicate whether the replicas should be retained. e.g:
|
||||
// resourcetemplate.karmada.io/retain-replicas: true // with value `true` indicates retain
|
||||
// resourcetemplate.karmada.io/retain-replicas: false // with value `false` and others, indicates not retain
|
||||
|
@ -77,6 +75,7 @@ const (
|
|||
EndpointSliceWorkManagedByLabel = "endpointslice.karmada.io/managed-by"
|
||||
)
|
||||
|
||||
// Define label values used by Karmada system.
|
||||
const (
|
||||
// ManagedByKarmadaLabelValue indicates that these are workloads in member cluster synchronized by karmada controllers.
|
||||
ManagedByKarmadaLabelValue = "true"
|
||||
|
@ -89,6 +88,12 @@ const (
|
|||
|
||||
// PropagationInstructionSuppressed indicates that the resource should not be propagated.
|
||||
PropagationInstructionSuppressed = "suppressed"
|
||||
|
||||
// EndpointSliceDispatchControllerLabelValue indicates the endpointSlice is controlled by Karmada endpointslice-dispatch-controller
|
||||
EndpointSliceDispatchControllerLabelValue = "endpointslice-dispatch-controller.karmada.io"
|
||||
|
||||
// EndpointSliceControllerLabelValue indicates the endpointSlice is controlled by Karmada endpointslice-controller
|
||||
EndpointSliceControllerLabelValue = "endpointslice-controller.karmada.io"
|
||||
)
|
||||
|
||||
// Define annotations used by karmada system.
|
||||
|
@ -245,6 +250,15 @@ var (
|
|||
EndpointSliceGVK = discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice")
|
||||
)
|
||||
|
||||
// Define resource group version resource.
|
||||
var (
|
||||
// ResourceInterpreterCustomizationsGVR is the GroupVersionResource of ResourceInterpreterCustomizations.
|
||||
ResourceInterpreterCustomizationsGVR = configv1alpha1.SchemeGroupVersion.WithResource("resourceinterpretercustomizations")
|
||||
|
||||
// ResourceInterpreterWebhookConfigurationsGVR is the GroupVersionResource of ResourceInterpreterWebhookConfigurations.
|
||||
ResourceInterpreterWebhookConfigurationsGVR = configv1alpha1.SchemeGroupVersion.WithResource("resourceinterpreterwebhookconfigurations")
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultFilePerm default file perm
|
||||
DefaultFilePerm = 0640
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import "k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
// MergeFinalizers merges the new finalizers into exist finalizers, and deduplicates the finalizers.
|
||||
// The result is sorted.
|
||||
func MergeFinalizers(existFinalizers, newFinalizers []string) []string {
|
||||
if existFinalizers == nil && newFinalizers == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizers := sets.New[string](existFinalizers...).Insert(newFinalizers...)
|
||||
return sets.List[string](finalizers)
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMergeFinalizers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existFinalizers []string
|
||||
newFinalizers []string
|
||||
expectedResult []string
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
existFinalizers: nil,
|
||||
newFinalizers: nil,
|
||||
expectedResult: nil,
|
||||
},
|
||||
{
|
||||
name: "exist finalizers is nil",
|
||||
existFinalizers: nil,
|
||||
newFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "new finalizers is nil",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
newFinalizers: nil,
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "both empty",
|
||||
existFinalizers: []string{},
|
||||
newFinalizers: []string{},
|
||||
expectedResult: []string{},
|
||||
},
|
||||
{
|
||||
name: "exist finalizers is empty",
|
||||
existFinalizers: []string{},
|
||||
newFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "new finalizers is empty",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
newFinalizers: []string{},
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "no duplicates",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
newFinalizers: []string{"finalizer3", "finalizer4"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3", "finalizer4"},
|
||||
},
|
||||
{
|
||||
name: "with duplicates",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
newFinalizers: []string{"finalizer2", "finalizer3"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3"},
|
||||
},
|
||||
{
|
||||
name: "all duplicates",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
newFinalizers: []string{"finalizer1", "finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "duplicates in exist finalizers",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2", "finalizer1"},
|
||||
newFinalizers: []string{"finalizer3"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3"},
|
||||
},
|
||||
{
|
||||
name: "duplicates in new finalizers",
|
||||
existFinalizers: []string{"finalizer1"},
|
||||
newFinalizers: []string{"finalizer2", "finalizer3", "finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3"},
|
||||
},
|
||||
{
|
||||
name: "duplicates in both",
|
||||
existFinalizers: []string{"finalizer1", "finalizer2", "finalizer1"},
|
||||
newFinalizers: []string{"finalizer2", "finalizer3", "finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3"},
|
||||
},
|
||||
{
|
||||
name: "single finalizer in exist",
|
||||
existFinalizers: []string{"finalizer1"},
|
||||
newFinalizers: []string{"finalizer2"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2"},
|
||||
},
|
||||
{
|
||||
name: "single duplicate finalizer",
|
||||
existFinalizers: []string{"finalizer1"},
|
||||
newFinalizers: []string{"finalizer1"},
|
||||
expectedResult: []string{"finalizer1"},
|
||||
},
|
||||
{
|
||||
name: "sort with result",
|
||||
existFinalizers: []string{"finalizer3", "finalizer1", "finalizer2"},
|
||||
newFinalizers: []string{"finalizer4", "finalizer5"},
|
||||
expectedResult: []string{"finalizer1", "finalizer2", "finalizer3", "finalizer4", "finalizer5"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := MergeFinalizers(tt.existFinalizers, tt.newFinalizers)
|
||||
if !reflect.DeepEqual(result, tt.expectedResult) {
|
||||
t.Errorf("MergeFinalizers() = %v, want %v", result, tt.expectedResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -238,9 +238,6 @@ func RemoveOrphanWorks(ctx context.Context, c client.Client, works []workv1alpha
|
|||
}
|
||||
|
||||
// FetchResourceTemplate fetches the resource template to be propagated.
|
||||
// Any updates to this resource template are not recommended as it may come from the informer cache.
|
||||
// We should abide by the principle of making a deep copy first and then modifying it.
|
||||
// See issue: https://github.com/karmada-io/karmada/issues/3878.
|
||||
func FetchResourceTemplate(
|
||||
ctx context.Context,
|
||||
dynamicClient dynamic.Interface,
|
||||
|
@ -266,12 +263,13 @@ func FetchResourceTemplate(
|
|||
// fall back to call api server in case the cache has not been synchronized yet
|
||||
klog.Warningf("Failed to get resource template (%s/%s/%s) from cache, Error: %v. Fall back to call api server.",
|
||||
resource.Kind, resource.Namespace, resource.Name, err)
|
||||
object, err = dynamicClient.Resource(gvr).Namespace(resource.Namespace).Get(ctx, resource.Name, metav1.GetOptions{})
|
||||
objectFromAPIServer, err := dynamicClient.Resource(gvr).Namespace(resource.Namespace).Get(ctx, resource.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get resource template (%s/%s/%s) from api server, Error: %v",
|
||||
resource.Kind, resource.Namespace, resource.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return objectFromAPIServer, nil
|
||||
}
|
||||
|
||||
unstructuredObj, err := ToUnstructured(object)
|
||||
|
@ -280,7 +278,7 @@ func FetchResourceTemplate(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return unstructuredObj, nil
|
||||
return unstructuredObj.DeepCopy(), nil
|
||||
}
|
||||
|
||||
// FetchResourceTemplatesByLabelSelector fetches the resource templates by label selector to be propagated.
|
||||
|
|
|
@ -140,3 +140,12 @@ func GetConsumerClusters(client client.Client, mcs *networkingv1alpha1.MultiClus
|
|||
}
|
||||
return allClusters, nil
|
||||
}
|
||||
|
||||
// IsEndpointSliceManagedByKarmada checks if the EndpointSlice is managed by Karmada.
|
||||
func IsEndpointSliceManagedByKarmada(epsLabels map[string]string) bool {
|
||||
switch util.GetLabelValue(epsLabels, discoveryv1.LabelManagedBy) {
|
||||
case util.EndpointSliceDispatchControllerLabelValue, util.EndpointSliceControllerLabelValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
)
|
||||
|
||||
|
@ -223,3 +224,50 @@ func TestDeleteEndpointSlice(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEndpointSliceManagedByKarmada(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
labels map[string]string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "managed by endpointslice-dispatch-controller",
|
||||
labels: map[string]string{
|
||||
discoveryv1.LabelManagedBy: util.EndpointSliceDispatchControllerLabelValue,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "managed by endpointslice-controller",
|
||||
labels: map[string]string{
|
||||
discoveryv1.LabelManagedBy: util.EndpointSliceControllerLabelValue,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "not managed by karmada",
|
||||
labels: map[string]string{
|
||||
discoveryv1.LabelManagedBy: "not-karmada",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "nil labels",
|
||||
labels: nil,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty labels",
|
||||
labels: map[string]string{},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := IsEndpointSliceManagedByKarmada(tt.labels); got != tt.want {
|
||||
t.Errorf("IsEndpointSliceManagedByKarmada() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -239,3 +239,24 @@ func matchZones(zoneMatchExpression *corev1.NodeSelectorRequirement, zones []str
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractUniqueNamespacedSelectors returns a new slice of ResourceSelector deduplicated by
|
||||
// APIVersion, Kind and Namespace. The returned selectors only contain APIVersion, Kind and Namespace;
|
||||
// other fields (e.g. Name, LabelSelector) are intentionally discarded.
|
||||
func ExtractUniqueNamespacedSelectors(selectors []policyv1alpha1.ResourceSelector) []policyv1alpha1.ResourceSelector {
|
||||
var results []policyv1alpha1.ResourceSelector
|
||||
handled := make(map[string]bool)
|
||||
for _, selector := range selectors {
|
||||
key := selector.APIVersion + "|" + selector.Kind + "|" + selector.Namespace
|
||||
if handled[key] {
|
||||
continue
|
||||
}
|
||||
results = append(results, policyv1alpha1.ResourceSelector{
|
||||
APIVersion: selector.APIVersion,
|
||||
Kind: selector.Kind,
|
||||
Namespace: selector.Namespace,
|
||||
})
|
||||
handled[key] = true
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
|
|
@ -1087,3 +1087,149 @@ func Test_matchZones(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractUniqueNamespacedSelectors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selectors []policyv1alpha1.ResourceSelector
|
||||
want []policyv1alpha1.ResourceSelector
|
||||
}{
|
||||
{
|
||||
name: "empty selectors",
|
||||
selectors: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "single selector",
|
||||
selectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
},
|
||||
want: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple different selectors",
|
||||
selectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
},
|
||||
want: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "duplicate selectors should be deduplicated",
|
||||
selectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod", // duplicate
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment", // duplicate
|
||||
},
|
||||
},
|
||||
want: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "selectors with same kind but different API versions",
|
||||
selectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
want: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "selectors with custom resources",
|
||||
selectors: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "custom.io/v1alpha1",
|
||||
Kind: "CustomResource",
|
||||
},
|
||||
{
|
||||
APIVersion: "example.com/v1beta1",
|
||||
Kind: "ExampleResource",
|
||||
},
|
||||
},
|
||||
want: []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: "custom.io/v1alpha1",
|
||||
Kind: "CustomResource",
|
||||
},
|
||||
{
|
||||
APIVersion: "example.com/v1beta1",
|
||||
Kind: "ExampleResource",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ExtractUniqueNamespacedSelectors(tt.selectors)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ExtractUniqueNamespacedSelectors() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,8 +97,17 @@ func (wh *Webhook) writeResponse(w io.Writer, response Response) {
|
|||
// writeResourceInterpreterResponse writes ar to w.
|
||||
func (wh *Webhook) writeResourceInterpreterResponse(w io.Writer, interpreterContext configv1alpha1.ResourceInterpreterContext) {
|
||||
if err := json.NewEncoder(w).Encode(interpreterContext); err != nil {
|
||||
klog.Errorf("unable to encode the response: %v", err)
|
||||
wh.writeResponse(w, Errored(http.StatusInternalServerError, err))
|
||||
klog.Errorf("unable to encode and write the response: %v", err)
|
||||
// Since the `ar configv1alpha1.ResourceInterpreterContext` is a clear and legal object,
|
||||
// it should not have problem to be marshalled into bytes.
|
||||
// The error here is probably caused by the abnormal HTTP connection,
|
||||
// e.g., broken pipe, so we can only write the error response once,
|
||||
// to avoid endless circular calling.
|
||||
// More info: https://github.com/kubernetes-sigs/controller-runtime/pull/1930
|
||||
serverError := Errored(http.StatusInternalServerError, err)
|
||||
if err = json.NewEncoder(w).Encode(configv1alpha1.ResourceInterpreterContext{Response: &serverError.ResourceInterpreterResponse}); err != nil {
|
||||
klog.Errorf("still unable to encode and write the InternalServerError response: %v", err)
|
||||
}
|
||||
} else {
|
||||
response := interpreterContext.Response
|
||||
if response.Successful {
|
||||
|
|
|
@ -59,24 +59,13 @@ func (m *mockBody) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// limitedBadResponseWriter is a custom io.Writer implementation that simulates
|
||||
// write errors for a specified number of attempts. After a certain number of failures,
|
||||
// it allows the write operation to succeed.
|
||||
type limitedBadResponseWriter struct {
|
||||
failCount int
|
||||
maxFailures int
|
||||
// brokenWriter implements the io.Writer interface.
|
||||
type brokenWriter struct {
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// Write simulates writing data to the writer. It forces an error response for
|
||||
// a limited number of attempts, specified by maxFailures. Once failCount reaches
|
||||
// maxFailures, it allows the write to succeed.
|
||||
func (b *limitedBadResponseWriter) Write(p []byte) (n int, err error) {
|
||||
if b.failCount < b.maxFailures {
|
||||
b.failCount++
|
||||
return 0, errors.New("forced write error")
|
||||
}
|
||||
// After reaching maxFailures, allow the write to succeed to stop the infinite loop.
|
||||
return len(p), nil
|
||||
func (bw *brokenWriter) Write(_ []byte) (int, error) {
|
||||
return 0, fmt.Errorf("mock: write: broken pipe")
|
||||
}
|
||||
|
||||
func TestServeHTTP(t *testing.T) {
|
||||
|
@ -294,20 +283,14 @@ func TestWriteResourceInterpreterResponse(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "WriteResourceInterpreterResponse_FailedToWrite_WriterReachedMaxFailures",
|
||||
name: "should never run into circular calling if the writer has broken",
|
||||
mockHandler: &HTTPMockHandler{},
|
||||
res: configv1alpha1.ResourceInterpreterContext{
|
||||
Response: &configv1alpha1.ResourceInterpreterResponse{},
|
||||
},
|
||||
rec: &limitedBadResponseWriter{maxFailures: 3},
|
||||
verify: func(writer io.Writer, _ *configv1alpha1.ResourceInterpreterResponse) error {
|
||||
data, ok := writer.(*limitedBadResponseWriter)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected writer of type limitedBadResponseWriter but got %T", writer)
|
||||
}
|
||||
if data.failCount != data.maxFailures {
|
||||
return fmt.Errorf("expected %d write failures, got %d", data.maxFailures, data.failCount)
|
||||
}
|
||||
rec: &brokenWriter{},
|
||||
verify: func(_ io.Writer, _ *configv1alpha1.ResourceInterpreterResponse) error {
|
||||
// reaching here means not running into circular calling
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1161,7 +1161,7 @@ var _ = ginkgo.Describe("[AdvancedCase] PropagationPolicy testing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() {
|
||||
var _ = ginkgo.Describe("Suspension: PropagationPolicy testing", func() {
|
||||
var policy *policyv1alpha1.PropagationPolicy
|
||||
var deployment *appsv1.Deployment
|
||||
var targetMember string
|
||||
|
@ -1190,6 +1190,12 @@ var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() {
|
|||
func(*appsv1.Deployment) bool {
|
||||
return true
|
||||
})
|
||||
ginkgo.By("update the pp suspension dispatching to true", func() {
|
||||
policy.Spec.Suspension = &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
}
|
||||
framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec)
|
||||
})
|
||||
ginkgo.DeferCleanup(func() {
|
||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
||||
|
@ -1197,13 +1203,6 @@ var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() {
|
|||
})
|
||||
|
||||
ginkgo.It("suspend the PP dispatching", func() {
|
||||
ginkgo.By("update the pp suspension dispatching to true", func() {
|
||||
policy.Spec.Suspension = &policyv1alpha1.Suspension{
|
||||
Dispatching: ptr.To(true),
|
||||
}
|
||||
framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec)
|
||||
})
|
||||
|
||||
ginkgo.By("check RB suspension spec", func() {
|
||||
framework.WaitResourceBindingFitWith(karmadaClient, deployment.Namespace, names.GenerateBindingName(deployment.Kind, deployment.Name),
|
||||
func(binding *workv1alpha2.ResourceBinding) bool {
|
||||
|
@ -1246,7 +1245,7 @@ var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.It("suspension resume", func() {
|
||||
ginkgo.It("suspension resume by setting policy suspension to a empty structure", func() {
|
||||
ginkgo.By("update deployment replicas", func() {
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
||||
})
|
||||
|
@ -1263,4 +1262,22 @@ var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() {
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("suspension resume by setting policy suspension to nil", func() {
|
||||
ginkgo.By("update deployment replicas", func() {
|
||||
framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas)
|
||||
})
|
||||
|
||||
ginkgo.By("resume the propagationPolicy", func() {
|
||||
policy.Spec.Suspension = nil
|
||||
framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec)
|
||||
})
|
||||
|
||||
ginkgo.By("check deployment replicas", func() {
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name,
|
||||
func(d *appsv1.Deployment) bool {
|
||||
return *d.Spec.Replicas == updateDeploymentReplicas
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue