Compare commits

..

No commits in common. "main" and "v1.8.0" have entirely different histories.
main ... v1.8.0

64 changed files with 1209 additions and 3069 deletions

View File

@ -1,3 +0,0 @@
# Karmada Community Code of Conduct
Please refer to our [Karmada Community Code of Conduct](https://github.com/karmada-io/community/blob/main/CODE_OF_CONDUCT.md).

View File

@ -1,21 +0,0 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
// +groupName=apps.karmada.io
package v1alpha1

View File

@ -1,151 +0,0 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
const (
// ResourceKindWorkloadRebalancer is kind name of WorkloadRebalancer.
ResourceKindWorkloadRebalancer = "WorkloadRebalancer"
// ResourceSingularWorkloadRebalancer is singular name of WorkloadRebalancer.
ResourceSingularWorkloadRebalancer = "workloadrebalancer"
// ResourcePluralWorkloadRebalancer is kind plural name of WorkloadRebalancer.
ResourcePluralWorkloadRebalancer = "workloadrebalancers"
// ResourceNamespaceScopedWorkloadRebalancer indicates if WorkloadRebalancer is NamespaceScoped.
ResourceNamespaceScopedWorkloadRebalancer = false
)
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:path=workloadrebalancers,scope="Cluster"
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadRebalancer represents the desired behavior and status of a job which can enforces a resource rebalance.
type WorkloadRebalancer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the specification of the desired behavior of WorkloadRebalancer.
// +required
Spec WorkloadRebalancerSpec `json:"spec"`
// Status represents the status of WorkloadRebalancer.
// +optional
Status WorkloadRebalancerStatus `json:"status,omitempty"`
}
// WorkloadRebalancerSpec represents the specification of the desired behavior of Reschedule.
type WorkloadRebalancerSpec struct {
// Workloads used to specify the list of expected resource.
// Nil or empty list is not allowed.
// +kubebuilder:validation:MinItems=1
// +required
Workloads []ObjectReference `json:"workloads"`
// TTLSecondsAfterFinished limits the lifetime of a WorkloadRebalancer that has finished execution (means each
// target workload is finished with result of Successful or Failed).
// If this field is set, ttlSecondsAfterFinished after the WorkloadRebalancer finishes, it is eligible to be automatically deleted.
// If this field is unset, the WorkloadRebalancer won't be automatically deleted.
// If this field is set to zero, the WorkloadRebalancer becomes eligible to be deleted immediately after it finishes.
// +optional
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
}
// ObjectReference the expected resource.
type ObjectReference struct {
// APIVersion represents the API version of the target resource.
// +required
APIVersion string `json:"apiVersion"`
// Kind represents the Kind of the target resource.
// +required
Kind string `json:"kind"`
// Name of the target resource.
// +required
Name string `json:"name"`
// Namespace of the target resource.
// Default is empty, which means it is a non-namespacescoped resource.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// WorkloadRebalancerStatus contains information about the current status of a WorkloadRebalancer
// updated periodically by schedule trigger controller.
type WorkloadRebalancerStatus struct {
// ObservedWorkloads contains information about the execution states and messages of target resources.
// +optional
ObservedWorkloads []ObservedWorkload `json:"observedWorkloads,omitempty"`
// ObservedGeneration is the generation(.metadata.generation) observed by the controller.
// If ObservedGeneration is less than the generation in metadata means the controller hasn't confirmed
// the rebalance result or hasn't done the rebalance yet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// FinishTime represents the finish time of rebalancer.
// +optional
FinishTime *metav1.Time `json:"finishTime,omitempty"`
}
// ObservedWorkload the observed resource.
type ObservedWorkload struct {
// Workload the observed resource.
// +required
Workload ObjectReference `json:"workload"`
// Result the observed rebalance result of resource.
// +optional
Result RebalanceResult `json:"result,omitempty"`
// Reason represents a machine-readable description of why this resource rebalanced failed.
// +optional
Reason RebalanceFailedReason `json:"reason,omitempty"`
}
// RebalanceResult the specific extent to which the resource has been rebalanced
type RebalanceResult string
const (
// RebalanceFailed the resource has been rebalance failed.
RebalanceFailed RebalanceResult = "Failed"
// RebalanceSuccessful the resource has been successfully rebalanced.
RebalanceSuccessful RebalanceResult = "Successful"
)
// RebalanceFailedReason represents a machine-readable description of why this resource rebalanced failed.
type RebalanceFailedReason string
const (
// RebalanceObjectNotFound the resource referenced binding not found.
RebalanceObjectNotFound RebalanceFailedReason = "ReferencedBindingNotFound"
)
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadRebalancerList contains a list of WorkloadRebalancer
type WorkloadRebalancerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// Items holds a list of WorkloadRebalancer.
Items []WorkloadRebalancer `json:"items"`
}

View File

@ -1,171 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
func (in *ObjectReference) DeepCopy() *ObjectReference {
if in == nil {
return nil
}
out := new(ObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObservedWorkload) DeepCopyInto(out *ObservedWorkload) {
*out = *in
out.Workload = in.Workload
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedWorkload.
func (in *ObservedWorkload) DeepCopy() *ObservedWorkload {
if in == nil {
return nil
}
out := new(ObservedWorkload)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancer) DeepCopyInto(out *WorkloadRebalancer) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancer.
func (in *WorkloadRebalancer) DeepCopy() *WorkloadRebalancer {
if in == nil {
return nil
}
out := new(WorkloadRebalancer)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadRebalancer) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerList) DeepCopyInto(out *WorkloadRebalancerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]WorkloadRebalancer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerList.
func (in *WorkloadRebalancerList) DeepCopy() *WorkloadRebalancerList {
if in == nil {
return nil
}
out := new(WorkloadRebalancerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadRebalancerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerSpec) DeepCopyInto(out *WorkloadRebalancerSpec) {
*out = *in
if in.Workloads != nil {
in, out := &in.Workloads, &out.Workloads
*out = make([]ObjectReference, len(*in))
copy(*out, *in)
}
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerSpec.
func (in *WorkloadRebalancerSpec) DeepCopy() *WorkloadRebalancerSpec {
if in == nil {
return nil
}
out := new(WorkloadRebalancerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerStatus) DeepCopyInto(out *WorkloadRebalancerStatus) {
*out = *in
if in.ObservedWorkloads != nil {
in, out := &in.ObservedWorkloads, &out.ObservedWorkloads
*out = make([]ObservedWorkload, len(*in))
copy(*out, *in)
}
if in.FinishTime != nil {
in, out := &in.FinishTime, &out.FinishTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerStatus.
func (in *WorkloadRebalancerStatus) DeepCopy() *WorkloadRebalancerStatus {
if in == nil {
return nil
}
out := new(WorkloadRebalancerStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -1,70 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "apps.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&WorkloadRebalancer{},
&WorkloadRebalancerList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -1,12 +1,9 @@
/* /*
Copyright 2023 The Karmada Authors. Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ -24,7 +21,7 @@ import (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=cronfederatedhpas,scope=Namespaced,shortName=cronfhpa,categories={karmada-io} // +kubebuilder:resource:shortName=cronfhpa,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string // +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string // +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name=`AGE`,type=date // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name=`AGE`,type=date

View File

@ -24,7 +24,7 @@ import (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=federatedhpas,scope=Namespaced,shortName=fhpa,categories={karmada-io} // +kubebuilder:resource:shortName=fhpa,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string // +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string // +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.minReplicas`,name=`MINPODS`,type=integer // +kubebuilder:printcolumn:JSONPath=`.spec.minReplicas`,name=`MINPODS`,type=integer

View File

@ -1,42 +1,42 @@
/* /*
Copyright 2023 The Karmada Authors. Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1alpha1 package v1alpha1
const ( const (
// FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io // FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io
FederatedHPAKind = "FederatedHPA" FederatedHPAKind = "FederatedHPA"
// QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to // QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
// record the query source cluster // record the query source cluster
QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster" QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
// ResourceSingularFederatedHPA is singular name of FederatedHPA. // ResourceSingularFederatedHPA is singular name of FederatedHPA.
ResourceSingularFederatedHPA = "federatedhpa" ResourceSingularFederatedHPA = "federatedhpa"
// ResourcePluralFederatedHPA is plural name of FederatedHPA. // ResourcePluralFederatedHPA is plural name of FederatedHPA.
ResourcePluralFederatedHPA = "federatedhpas" ResourcePluralFederatedHPA = "federatedhpas"
// ResourceNamespaceScopedFederatedHPA is the scope of the FederatedHPA // ResourceNamespaceScopedFederatedHPA is the scope of the FederatedHPA
ResourceNamespaceScopedFederatedHPA = true ResourceNamespaceScopedFederatedHPA = true
// ResourceKindCronFederatedHPA is kind name of CronFederatedHPA. // ResourceKindCronFederatedHPA is kind name of CronFederatedHPA.
ResourceKindCronFederatedHPA = "CronFederatedHPA" ResourceKindCronFederatedHPA = "CronFederatedHPA"
// ResourceSingularCronFederatedHPA is singular name of CronFederatedHPA. // ResourceSingularCronFederatedHPA is singular name of CronFederatedHPA.
ResourceSingularCronFederatedHPA = "cronfederatedhpa" ResourceSingularCronFederatedHPA = "cronfederatedhpa"
// ResourcePluralCronFederatedHPA is plural name of CronFederatedHPA. // ResourcePluralCronFederatedHPA is plural name of CronFederatedHPA.
ResourcePluralCronFederatedHPA = "cronfederatedhpas" ResourcePluralCronFederatedHPA = "cronfederatedhpas"
// ResourceNamespaceScopedCronFederatedHPA is the scope of the CronFederatedHPA // ResourceNamespaceScopedCronFederatedHPA is the scope of the CronFederatedHPA
ResourceNamespaceScopedCronFederatedHPA = true ResourceNamespaceScopedCronFederatedHPA = true
) )

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

View File

@ -18,7 +18,6 @@ package install
import ( import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/cluster" "github.com/karmada-io/api/cluster"
@ -28,6 +27,6 @@ import (
// Install registers the API group and adds types to a scheme. // Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) { func Install(scheme *runtime.Scheme) {
utilruntime.Must(cluster.AddToScheme(scheme)) utilruntime.Must(cluster.AddToScheme(scheme))
utilruntime.Must(clusterv1alpha1.Install(scheme)) utilruntime.Must(clusterv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: clusterv1alpha1.GroupVersion.Group, Version: clusterv1alpha1.GroupVersion.Version})) utilruntime.Must(scheme.SetVersionPriority(clusterv1alpha1.SchemeGroupVersion))
} }

View File

@ -0,0 +1,368 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mutation
import (
"fmt"
"math"
"reflect"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clusterapis "github.com/karmada-io/api/cluster"
)
func TestMutateCluster(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
fun func(args) error
}{
{
name: "test mutate cluster Taints",
args: args{
cluster: &clusterapis.Cluster{
Spec: clusterapis.ClusterSpec{
Taints: []corev1.Taint{
{
Key: "foo",
Value: "abc",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "bar",
Effect: corev1.TaintEffectNoExecute,
}}}},
},
fun: func(data args) error {
for i := range data.cluster.Spec.Taints {
if data.cluster.Spec.Taints[i].Effect == corev1.TaintEffectNoExecute && data.cluster.Spec.Taints[i].TimeAdded == nil {
return fmt.Errorf("failed to mutate cluster, taints TimeAdded should not be nil")
}
}
return nil
},
},
{
name: "test mutate cluster Zone",
args: args{
cluster: &clusterapis.Cluster{
Spec: clusterapis.ClusterSpec{
Zone: "zone1",
},
},
},
fun: func(data args) error {
if data.cluster.Spec.Zone != "" && len(data.cluster.Spec.Zones) == 0 {
return fmt.Errorf("failed to mutate cluster, zones should not be nil")
}
return nil
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
MutateCluster(tt.args.cluster)
if err := tt.fun(tt.args); err != nil {
t.Error(err)
}
})
}
}
func TestStandardizeClusterResourceModels(t *testing.T) {
testCases := map[string]struct {
models []clusterapis.ResourceModel
expectedModels []clusterapis.ResourceModel
}{
"sort models": {
models: []clusterapis.ResourceModel{
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"start with 0": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"end with MaxInt64": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
}
for name, testCase := range testCases {
StandardizeClusterResourceModels(testCase.models)
if !reflect.DeepEqual(testCase.models, testCase.expectedModels) {
t.Errorf("expected sorted resource models for %q, but it did not work", name)
return
}
}
}
func TestSetDefaultClusterResourceModels(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
wantModels []clusterapis.ResourceModel
}{
{
name: "test set default Cluster",
args: args{
cluster: &clusterapis.Cluster{},
},
wantModels: []clusterapis.ResourceModel{
{
Grade: 0,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(0, resource.BinarySI),
Max: *resource.NewQuantity(4*GB, resource.BinarySI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(4*GB, resource.BinarySI),
Max: *resource.NewQuantity(16*GB, resource.BinarySI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(4, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(16*GB, resource.BinarySI),
Max: *resource.NewQuantity(32*GB, resource.BinarySI),
},
},
},
{
Grade: 3,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(4, resource.DecimalSI),
Max: *resource.NewQuantity(8, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(32*GB, resource.BinarySI),
Max: *resource.NewQuantity(64*GB, resource.BinarySI),
},
},
},
{
Grade: 4,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(8, resource.DecimalSI),
Max: *resource.NewQuantity(16, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(64*GB, resource.BinarySI),
Max: *resource.NewQuantity(128*GB, resource.BinarySI),
},
},
},
{
Grade: 5,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(16, resource.DecimalSI),
Max: *resource.NewQuantity(32, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(128*GB, resource.BinarySI),
Max: *resource.NewQuantity(256*GB, resource.BinarySI),
},
},
},
{
Grade: 6,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(32, resource.DecimalSI),
Max: *resource.NewQuantity(64, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(256*GB, resource.BinarySI),
Max: *resource.NewQuantity(512*GB, resource.BinarySI),
},
},
},
{
Grade: 7,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(64, resource.DecimalSI),
Max: *resource.NewQuantity(128, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(512*GB, resource.BinarySI),
Max: *resource.NewQuantity(1024*GB, resource.BinarySI),
},
},
},
{
Grade: 8,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(128, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(1024*GB, resource.BinarySI),
Max: *resource.NewQuantity(math.MaxInt64, resource.BinarySI),
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
SetDefaultClusterResourceModels(tt.args.cluster)
})
if !reflect.DeepEqual(tt.args.cluster.Spec.ResourceModels, tt.wantModels) {
t.Errorf("SetDefaultClusterResourceModels expected resourceModels %+v, bud get %+v", tt.wantModels, tt.args.cluster.Spec.ResourceModels)
return
}
}
}

View File

@ -28,7 +28,7 @@ import (
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Cluster represents the desired state and status of a member cluster. // Cluster represents the desire state and status of a member cluster.
type Cluster struct { type Cluster struct {
metav1.TypeMeta metav1.TypeMeta
metav1.ObjectMeta metav1.ObjectMeta
@ -44,8 +44,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster. // ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct { type ClusterSpec struct {
// ID is the unique identifier for the cluster. // ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and is typically collected automatically // It is different from the object uid(.metadata.uid) and typically collected automatically
// from each member cluster during the process of registration. // from member cluster during the progress of registration.
// //
// The value is collected in order: // The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by // 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -63,7 +63,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000 // +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"` ID string `json:"id,omitempty"`
// SyncMode describes how a cluster syncs resources from karmada control plane. // SyncMode describes how a cluster sync resources from karmada control plane.
// +required // +required
SyncMode ClusterSyncMode SyncMode ClusterSyncMode
@ -72,14 +72,14 @@ type ClusterSpec struct {
// +optional // +optional
APIEndpoint string APIEndpoint string
// SecretRef represents the secret that contains mandatory credentials to access the member cluster. // SecretRef represents the secret contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows: // The secret should hold credentials as follows:
// - secret.data.token // - secret.data.token
// - secret.data.caBundle // - secret.data.caBundle
// +optional // +optional
SecretRef *LocalSecretReference SecretRef *LocalSecretReference
// ImpersonatorSecretRef represents the secret that contains the token of impersonator. // ImpersonatorSecretRef represents the secret contains the token of impersonator.
// The secret should hold credentials as follows: // The secret should hold credentials as follows:
// - secret.data.token // - secret.data.token
// +optional // +optional
@ -94,12 +94,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster. // ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster. // If not empty, the karmada control plane will use this proxy to talk to the cluster.
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351 // More details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional // +optional
ProxyURL string ProxyURL string
// ProxyHeader is the HTTP header required by proxy server. // ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads. // The key in the key-value pair is HTTP header key and value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3'). // For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional // +optional
ProxyHeader map[string]string ProxyHeader map[string]string
@ -108,12 +108,12 @@ type ClusterSpec struct {
// +optional // +optional
Provider string Provider string
// Region represents the region in which the member cluster is located. // Region represents the region of the member cluster locate in.
// +optional // +optional
Region string Region string
// Zone represents the zone in which the member cluster is located. // Zone represents the zone of the member cluster locate in.
// Deprecated: This field was never used by Karmada, and it will not be // Deprecated: This filed was never been used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead. // removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional // +optional
Zone string Zone string
@ -126,7 +126,7 @@ type ClusterSpec struct {
// +optional // +optional
Zones []string `json:"zones,omitempty"` Zones []string `json:"zones,omitempty"`
// Taints are attached to the member cluster. // Taints attached to the member cluster.
// Taints on the cluster have the "effect" on // Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint. // any resource that does not tolerate the Taint.
// +optional // +optional
@ -204,8 +204,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max. // ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive. // Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10). // E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite, // This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval. // it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left, // For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min. // in other words, max must be greater than min.
@ -213,7 +213,7 @@ type ResourceModel struct {
type ResourceModelRange struct { type ResourceModelRange struct {
// Name is the name for the resource that you want to categorize. // Name is the name for the resource that you want to categorize.
// +required // +required
Name corev1.ResourceName Name ResourceName
// Min is the minimum amount of this resource represented by resource name. // Min is the minimum amount of this resource represented by resource name.
// Note: The Min value of first grade(usually 0) always acts as zero. // Note: The Min value of first grade(usually 0) always acts as zero.
@ -242,13 +242,13 @@ const (
type ClusterSyncMode string type ClusterSyncMode string
const ( const (
// Push means that the controller on the karmada control plane will be in charge of synchronization. // Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster. // The controller watches resources change on karmada control plane then pushes them to member cluster.
Push ClusterSyncMode = "Push" Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will be in charge of synchronization. // Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them // The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies them locally on the member cluster. // and applies locally on the member cluster.
Pull ClusterSyncMode = "Pull" Pull ClusterSyncMode = "Pull"
) )
@ -258,7 +258,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced. // Namespace is the namespace for the resource being referenced.
Namespace string Namespace string
// Name is the name of the resource being referenced. // Name is the name of resource being referenced.
Name string Name string
} }
@ -266,9 +266,6 @@ type LocalSecretReference struct {
const ( const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads. // ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready" ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
) )
// ClusterStatus contains information about the current status of a // ClusterStatus contains information about the current status of a
@ -278,7 +275,7 @@ type ClusterStatus struct {
// +optional // +optional
KubernetesVersion string KubernetesVersion string
// APIEnablements represents the list of APIs installed on the member cluster. // APIEnablements represents the list of APIs installed in the member cluster.
// +optional // +optional
APIEnablements []APIEnablement APIEnablements []APIEnablement
@ -293,11 +290,6 @@ type ClusterStatus struct {
// ResourceSummary represents the summary of resources in the member cluster. // ResourceSummary represents the summary of resources in the member cluster.
// +optional // +optional
ResourceSummary *ResourceSummary ResourceSummary *ResourceSummary
// RemedyActions represents the remedy actions that needs to be performed
// on the cluster.
// +optional
RemedyActions []string
} }
// APIEnablement is a list of API resource, it is used to expose the name of the // APIEnablement is a list of API resource, it is used to expose the name of the
@ -369,7 +361,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member clusters // ClusterList contains a list of member cluster
type ClusterList struct { type ClusterList struct {
metav1.TypeMeta metav1.TypeMeta
metav1.ListMeta metav1.ListMeta

View File

@ -39,7 +39,7 @@ const (
// +kubebuilder:resource:scope="Cluster" // +kubebuilder:resource:scope="Cluster"
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// Cluster represents the desired state and status of a member cluster. // Cluster represents the desire state and status of a member cluster.
type Cluster struct { type Cluster struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`
@ -55,8 +55,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster. // ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct { type ClusterSpec struct {
// ID is the unique identifier for the cluster. // ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and is typically collected automatically // It is different from the object uid(.metadata.uid) and typically collected automatically
// from each member cluster during the process of registration. // from member cluster during the progress of registration.
// //
// The value is collected in order: // The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by // 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -74,7 +74,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000 // +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"` ID string `json:"id,omitempty"`
// SyncMode describes how a cluster syncs resources from karmada control plane. // SyncMode describes how a cluster sync resources from karmada control plane.
// +kubebuilder:validation:Enum=Push;Pull // +kubebuilder:validation:Enum=Push;Pull
// +required // +required
SyncMode ClusterSyncMode `json:"syncMode"` SyncMode ClusterSyncMode `json:"syncMode"`
@ -84,14 +84,14 @@ type ClusterSpec struct {
// +optional // +optional
APIEndpoint string `json:"apiEndpoint,omitempty"` APIEndpoint string `json:"apiEndpoint,omitempty"`
// SecretRef represents the secret that contains mandatory credentials to access the member cluster. // SecretRef represents the secret contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows: // The secret should hold credentials as follows:
// - secret.data.token // - secret.data.token
// - secret.data.caBundle // - secret.data.caBundle
// +optional // +optional
SecretRef *LocalSecretReference `json:"secretRef,omitempty"` SecretRef *LocalSecretReference `json:"secretRef,omitempty"`
// ImpersonatorSecretRef represents the secret that contains the token of impersonator. // ImpersonatorSecretRef represents the secret contains the token of impersonator.
// The secret should hold credentials as follows: // The secret should hold credentials as follows:
// - secret.data.token // - secret.data.token
// +optional // +optional
@ -106,12 +106,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster. // ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster. // If not empty, the karmada control plane will use this proxy to talk to the cluster.
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351 // More details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional // +optional
ProxyURL string `json:"proxyURL,omitempty"` ProxyURL string `json:"proxyURL,omitempty"`
// ProxyHeader is the HTTP header required by proxy server. // ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads. // The key in the key-value pair is HTTP header key and value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3'). // For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional // +optional
ProxyHeader map[string]string `json:"proxyHeader,omitempty"` ProxyHeader map[string]string `json:"proxyHeader,omitempty"`
@ -120,12 +120,12 @@ type ClusterSpec struct {
// +optional // +optional
Provider string `json:"provider,omitempty"` Provider string `json:"provider,omitempty"`
// Region represents the region in which the member cluster is located. // Region represents the region of the member cluster locate in.
// +optional // +optional
Region string `json:"region,omitempty"` Region string `json:"region,omitempty"`
// Zone represents the zone in which the member cluster is located. // Zone represents the zone of the member cluster locate in.
// Deprecated: This field was never been used by Karmada, and it will not be // Deprecated: This filed was never been used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead. // removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional // +optional
Zone string `json:"zone,omitempty"` Zone string `json:"zone,omitempty"`
@ -138,7 +138,7 @@ type ClusterSpec struct {
// +optional // +optional
Zones []string `json:"zones,omitempty"` Zones []string `json:"zones,omitempty"`
// Taints are attached to the member cluster. // Taints attached to the member cluster.
// Taints on the cluster have the "effect" on // Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint. // any resource that does not tolerate the Taint.
// +optional // +optional
@ -216,8 +216,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max. // ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive. // Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10). // E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite, // This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval. // it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left, // For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min. // in other words, max must be greater than min.
@ -254,13 +254,13 @@ const (
type ClusterSyncMode string type ClusterSyncMode string
const ( const (
// Push means that the controller on the karmada control plane will be in charge of synchronization. // Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster. // The controller watches resources change on karmada control plane then pushes them to member cluster.
Push ClusterSyncMode = "Push" Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will be in charge of synchronization. // Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them // The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies them locally on the member cluster. // and applies locally on the member cluster.
Pull ClusterSyncMode = "Pull" Pull ClusterSyncMode = "Pull"
) )
@ -270,7 +270,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced. // Namespace is the namespace for the resource being referenced.
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
// Name is the name of the resource being referenced. // Name is the name of resource being referenced.
Name string `json:"name"` Name string `json:"name"`
} }
@ -278,9 +278,6 @@ type LocalSecretReference struct {
const ( const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads. // ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready" ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
) )
// ClusterStatus contains information about the current status of a // ClusterStatus contains information about the current status of a
@ -290,7 +287,7 @@ type ClusterStatus struct {
// +optional // +optional
KubernetesVersion string `json:"kubernetesVersion,omitempty"` KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// APIEnablements represents the list of APIs installed on the member cluster. // APIEnablements represents the list of APIs installed in the member cluster.
// +optional // +optional
APIEnablements []APIEnablement `json:"apiEnablements,omitempty"` APIEnablements []APIEnablement `json:"apiEnablements,omitempty"`
@ -305,11 +302,6 @@ type ClusterStatus struct {
// ResourceSummary represents the summary of resources in the member cluster. // ResourceSummary represents the summary of resources in the member cluster.
// +optional // +optional
ResourceSummary *ResourceSummary `json:"resourceSummary,omitempty"` ResourceSummary *ResourceSummary `json:"resourceSummary,omitempty"`
// RemedyActions represents the remedy actions that needs to be performed
// on the cluster.
// +optional
RemedyActions []string `json:"remedyActions,omitempty"`
} }
// APIEnablement is a list of API resource, it is used to expose the name of the // APIEnablement is a list of API resource, it is used to expose the name of the
@ -378,7 +370,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member clusters // ClusterList contains a list of member cluster
type ClusterList struct { type ClusterList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"` metav1.ListMeta `json:"metadata,omitempty"`

View File

@ -27,6 +27,8 @@ const (
// (corresponding to ClusterConditionReady status ConditionUnknown) // (corresponding to ClusterConditionReady status ConditionUnknown)
// and removed when cluster becomes reachable (ClusterConditionReady status ConditionTrue). // and removed when cluster becomes reachable (ClusterConditionReady status ConditionTrue).
TaintClusterUnreachable = "cluster.karmada.io/unreachable" TaintClusterUnreachable = "cluster.karmada.io/unreachable"
// TaintClusterTerminating will be added when cluster is terminating.
TaintClusterTerminating = "cluster.karmada.io/terminating"
// CacheSourceAnnotationKey is the annotation that added to a resource to // CacheSourceAnnotationKey is the annotation that added to a resource to
// represent which cluster it cached from. // represent which cluster it cached from.

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT. // Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
@ -387,7 +371,6 @@ func autoConvert_v1alpha1_ClusterStatus_To_cluster_ClusterStatus(in *ClusterStat
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions)) out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.NodeSummary = (*cluster.NodeSummary)(unsafe.Pointer(in.NodeSummary)) out.NodeSummary = (*cluster.NodeSummary)(unsafe.Pointer(in.NodeSummary))
out.ResourceSummary = (*cluster.ResourceSummary)(unsafe.Pointer(in.ResourceSummary)) out.ResourceSummary = (*cluster.ResourceSummary)(unsafe.Pointer(in.ResourceSummary))
out.RemedyActions = *(*[]string)(unsafe.Pointer(&in.RemedyActions))
return nil return nil
} }
@ -402,7 +385,6 @@ func autoConvert_cluster_ClusterStatus_To_v1alpha1_ClusterStatus(in *cluster.Clu
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions)) out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.NodeSummary = (*NodeSummary)(unsafe.Pointer(in.NodeSummary)) out.NodeSummary = (*NodeSummary)(unsafe.Pointer(in.NodeSummary))
out.ResourceSummary = (*ResourceSummary)(unsafe.Pointer(in.ResourceSummary)) out.ResourceSummary = (*ResourceSummary)(unsafe.Pointer(in.ResourceSummary))
out.RemedyActions = *(*[]string)(unsafe.Pointer(&in.RemedyActions))
return nil return nil
} }

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
@ -245,11 +229,6 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = new(ResourceSummary) *out = new(ResourceSummary)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.RemedyActions != nil {
in, out := &in.RemedyActions, &out.RemedyActions
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package cluster package cluster
@ -245,11 +229,6 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = new(ResourceSummary) *out = new(ResourceSummary)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.RemedyActions != nil {
in, out := &in.RemedyActions, &out.RemedyActions
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@ -34,11 +34,8 @@ const (
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=resourceinterpretercustomizations,scope="Cluster",shortName=ric,categories={karmada-io} // +kubebuilder:resource:scope="Cluster"
// +kubebuilder:storageversion // +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.spec.target.apiVersion`,name="TARGET-API-VERSION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.target.kind`,name="TARGET-KIND",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ResourceInterpreterCustomization describes the configuration of a specific // ResourceInterpreterCustomization describes the configuration of a specific
// resource for Karmada to get the structure. // resource for Karmada to get the structure.
@ -142,14 +139,11 @@ type LocalValueRetention struct {
// to the desired specification. // to the desired specification.
// //
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function Retain(desiredObj, observedObj)
// luaScript: > // desiredObj.spec.fieldFoo = observedObj.spec.fieldFoo
// function Retain(desiredObj, observedObj) // return desiredObj
// desiredObj.spec.fieldFoo = observedObj.spec.fieldFoo // end
// return desiredObj
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -173,19 +167,16 @@ type ReplicaResourceRequirement struct {
// replica as well as resource requirements // replica as well as resource requirements
// //
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function GetReplicas(desiredObj)
// luaScript: > // replica = desiredObj.spec.replicas
// function GetReplicas(desiredObj) // requirement = {}
// replica = desiredObj.spec.replicas // requirement.nodeClaim = {}
// requirement = {} // requirement.nodeClaim.nodeSelector = desiredObj.spec.template.spec.nodeSelector
// requirement.nodeClaim = {} // requirement.nodeClaim.tolerations = desiredObj.spec.template.spec.tolerations
// requirement.nodeClaim.nodeSelector = desiredObj.spec.template.spec.nodeSelector // requirement.resourceRequest = desiredObj.spec.template.spec.containers[1].resources.limits
// requirement.nodeClaim.tolerations = desiredObj.spec.template.spec.tolerations // return replica, requirement
// requirement.resourceRequest = desiredObj.spec.template.spec.containers[1].resources.limits // end
// return replica, requirement
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -207,14 +198,11 @@ type ReplicaResourceRequirement struct {
type ReplicaRevision struct { type ReplicaRevision struct {
// LuaScript holds the Lua script that is used to revise replicas in the desired specification. // LuaScript holds the Lua script that is used to revise replicas in the desired specification.
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function ReviseReplica(desiredObj, desiredReplica)
// luaScript: > // desiredObj.spec.replicas = desiredReplica
// function ReviseReplica(desiredObj, desiredReplica) // return desiredObj
// desiredObj.spec.replicas = desiredReplica // end
// return desiredObj
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -234,15 +222,12 @@ type ReplicaRevision struct {
type StatusReflection struct { type StatusReflection struct {
// LuaScript holds the Lua script that is used to get the status from the observed specification. // LuaScript holds the Lua script that is used to get the status from the observed specification.
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function ReflectStatus(observedObj)
// luaScript: > // status = {}
// function ReflectStatus(observedObj) // status.readyReplicas = observedObj.status.observedObj
// status = {} // return status
// status.readyReplicas = observedObj.status.observedObj // end
// return status
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -262,16 +247,13 @@ type StatusAggregation struct {
// LuaScript holds the Lua script that is used to aggregate decentralized statuses // LuaScript holds the Lua script that is used to aggregate decentralized statuses
// to the desired specification. // to the desired specification.
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function AggregateStatus(desiredObj, statusItems)
// luaScript: > // for i = 1, #statusItems do
// function AggregateStatus(desiredObj, statusItems) // desiredObj.status.readyReplicas = desiredObj.status.readyReplicas + items[i].readyReplicas
// for i = 1, #statusItems do // end
// desiredObj.status.readyReplicas = desiredObj.status.readyReplicas + items[i].readyReplicas // return desiredObj
// end // end
// return desiredObj
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -291,15 +273,12 @@ type HealthInterpretation struct {
// LuaScript holds the Lua script that is used to assess the health state of // LuaScript holds the Lua script that is used to assess the health state of
// a specific resource. // a specific resource.
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function InterpretHealth(observedObj)
// luaScript: > // if observedObj.status.readyReplicas == observedObj.spec.replicas then
// function InterpretHealth(observedObj) // return true
// if observedObj.status.readyReplicas == observedObj.spec.replicas then // end
// return true // end
// end
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.
@ -320,23 +299,20 @@ type DependencyInterpretation struct {
// LuaScript holds the Lua script that is used to interpret the dependencies of // LuaScript holds the Lua script that is used to interpret the dependencies of
// a specific resource. // a specific resource.
// The script should implement a function as follows: // The script should implement a function as follows:
// // luaScript: >
// ``` // function GetDependencies(desiredObj)
// luaScript: > // dependencies = {}
// function GetDependencies(desiredObj) // if desiredObj.spec.serviceAccountName ~= nil and desiredObj.spec.serviceAccountName ~= "default" then
// dependencies = {} // dependency = {}
// serviceAccountName = desiredObj.spec.template.spec.serviceAccountName // dependency.apiVersion = "v1"
// if serviceAccountName ~= nil and serviceAccountName ~= "default" then // dependency.kind = "ServiceAccount"
// dependency = {} // dependency.name = desiredObj.spec.serviceAccountName
// dependency.apiVersion = "v1" // dependency.namespace = desiredObj.namespace
// dependency.kind = "ServiceAccount" // dependencies[1] = {}
// dependency.name = serviceAccountName // dependencies[1] = dependency
// dependency.namespace = desiredObj.metadata.namespace // end
// dependencies[1] = dependency // return dependencies
// end // end
// return dependencies
// end
// ```
// //
// The content of the LuaScript needs to be a whole function including both // The content of the LuaScript needs to be a whole function including both
// declaration and implementation. // declaration and implementation.

View File

@ -35,7 +35,7 @@ const (
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=resourceinterpreterwebhookconfigurations,scope="Cluster",categories={karmada-io} // +kubebuilder:resource:scope="Cluster"
// +kubebuilder:storageversion // +kubebuilder:storageversion
// ResourceInterpreterWebhookConfiguration describes the configuration of webhooks which take the responsibility to // ResourceInterpreterWebhookConfiguration describes the configuration of webhooks which take the responsibility to
@ -56,24 +56,6 @@ type ResourceInterpreterWebhook struct {
Name string `json:"name"` Name string `json:"name"`
// ClientConfig defines how to communicate with the hook. // ClientConfig defines how to communicate with the hook.
// It supports two mutually exclusive configuration modes:
//
// 1. URL - Directly specify the webhook URL with format `scheme://host:port/path`.
// Example: https://webhook.example.com:8443/my-interpreter
//
// 2. Service - Reference a Kubernetes Service that exposes the webhook.
// When using Service reference, Karmada resolves the endpoint through following steps:
// a) First attempts to locate the Service in karmada-apiserver
// b) If found, constructs URL based on Service type:
// - ClusterIP/LoadBalancer/NodePort: Uses ClusterIP with port from Service spec
// (Note: Services with ClusterIP "None" are rejected), Example:
// `https://<cluster ip>:<port>`
// - ExternalName: Uses external DNS name format: `https://<external name>:<port>`
// c) If NOT found in karmada-apiserver, falls back to standard Kubernetes
// service DNS name format: `https://<service>.<namespace>.svc:<port>`
//
// Note: When both URL and Service are specified, the Service reference takes precedence
// and the URL configuration will be ignored.
// +required // +required
ClientConfig admissionregistrationv1.WebhookClientConfig `json:"clientConfig"` ClientConfig admissionregistrationv1.WebhookClientConfig `json:"clientConfig"`
@ -117,7 +99,7 @@ type RuleWithOperations struct {
type InterpreterOperation string type InterpreterOperation string
const ( const (
// InterpreterOperationAll indicates matching all InterpreterOperation. // InterpreterOperationAll indicates math all InterpreterOperation.
InterpreterOperationAll InterpreterOperation = "*" InterpreterOperationAll InterpreterOperation = "*"
// InterpreterOperationInterpretReplica indicates that karmada want to figure out the replica declaration of a specific object. // InterpreterOperationInterpretReplica indicates that karmada want to figure out the replica declaration of a specific object.

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

30
go.mod
View File

@ -1,30 +1,28 @@
module github.com/karmada-io/api module github.com/karmada-io/api
go 1.23.8 go 1.20
require ( require (
k8s.io/api v0.32.3 k8s.io/api v0.27.8
k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apiextensions-apiserver v0.27.8
k8s.io/apimachinery v0.32.3 k8s.io/apimachinery v0.27.8
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 k8s.io/utils v0.0.0-20230209194617-a36077c30491
sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/controller-runtime v0.15.0
) )
require ( require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.17.0 // indirect
golang.org/x/net v0.39.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/text v0.24.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect k8s.io/klog/v2 v2.90.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
) )

80
go.sum
View File

@ -1,17 +1,12 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -19,28 +14,20 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -52,8 +39,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -62,8 +49,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -74,26 +61,27 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.27.8 h1:Ja93gbyII5c3TJzWefEwGhlqC5SZksWhzRS+OYHIocU=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.27.8/go.mod h1:2HuWJC6gpx4UScY+ezziNzv6j6Jqd2q0rGgobYSSjcs=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apiextensions-apiserver v0.27.8 h1:u9PON71euIhVbHdZ5YlznpY60GtRjPagf1mQXLo1siA=
k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.27.8/go.mod h1:qBlRJTKCA0gnFVCsjzbz3YJZ49TCBNEwvEF2TPMRqOs=
k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.27.8 h1:Xg+ogjDm8s7KmV3vZGf7uOZ0jrC6FPy2Lk/h7BIRmvg=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.27.8/go.mod h1:EIXLxLt/b1muPITiF5zlrcm7I+YnXsIgM+0GdnPTQvA=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk=
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -35,7 +35,7 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=multiclusteringresses,scope=Namespaced,shortName=mci,categories={karmada-io} // +kubebuilder:resource:shortName=mci,categories={karmada-io}
// MultiClusterIngress is a collection of rules that allow inbound connections to reach the // MultiClusterIngress is a collection of rules that allow inbound connections to reach the
// endpoints defined by a backend. The structure of MultiClusterIngress is same as Ingress, // endpoints defined by a backend. The structure of MultiClusterIngress is same as Ingress,
@ -50,35 +50,7 @@ type MultiClusterIngress struct {
// Status is the current state of the MultiClusterIngress. // Status is the current state of the MultiClusterIngress.
// +optional // +optional
Status MultiClusterIngressStatus `json:"status,omitempty"` Status networkingv1.IngressStatus `json:"status,omitempty"`
}
// MultiClusterIngressStatus is the current state of the MultiClusterIngress.
type MultiClusterIngressStatus struct {
networkingv1.IngressStatus `json:",inline"`
// TrafficBlockClusters records the cluster name list that needs to perform traffic block.
// When the cloud provider implements its multicluster-cloud-provider and refreshes
// the service backend address to the LoadBalancer Service, it needs to filter out
// the backend addresses in these clusters.
// +optional
TrafficBlockClusters []string `json:"trafficBlockClusters,omitempty"`
// ServiceLocations records the locations of MulticlusterIngress's backend
// Service resources. It will be set by the system controller.
// +optional
ServiceLocations []ServiceLocation `json:"serviceLocations,omitempty"`
}
// ServiceLocation records the locations of MulticlusterIngress's backend Service resources.
type ServiceLocation struct {
// name is the referenced service. The service must exist in
// the same namespace as the MultiClusterService object.
Name string `json:"name"`
// Clusters records the cluster list where the Service is located.
// +optional
Clusters []string `json:"clusters,omitempty"`
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -39,7 +39,7 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=multiclusterservices,scope=Namespaced,shortName=mcs,categories={karmada-io} // +kubebuilder:resource:shortName=mcs,categories={karmada-io}
// MultiClusterService is a named abstraction of multi-cluster software service. // MultiClusterService is a named abstraction of multi-cluster software service.
// The name field of MultiClusterService is the same as that of Service name. // The name field of MultiClusterService is the same as that of Service name.
@ -79,40 +79,20 @@ type MultiClusterServiceSpec struct {
// If not set and Types contains CrossCluster, all clusters will // If not set and Types contains CrossCluster, all clusters will
// be selected, that means the referencing service will be exposed // be selected, that means the referencing service will be exposed
// across all registered clusters. // across all registered clusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters. // Deprecated: in favor of ServiceProvisionClusters/ServiceConsumptionClusters.
// +optional // +optional
Range ExposureRange `json:"range,omitempty"` Range ExposureRange `json:"range,omitempty"`
// ServiceProvisionClusters specifies the clusters which will provision the service backend. // ServiceProvisionClusters specifies the clusters which will provision the service backend.
// If leave it empty, we will collect the backend endpoints from all clusters and sync // If leave it empty, we will collect the backend endpoints from all clusters and sync
// them to the ServiceConsumptionClusters. // them to the ServiceConsumptionClusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters.
// +optional // +optional
ServiceProvisionClusters []string `json:"serviceProvisionClusters,omitempty"` ServiceProvisionClusters []string `json:"serviceProvisionClusters,omitempty"`
// ServiceConsumptionClusters specifies the clusters where the service will be exposed, for clients. // ServiceConsumptionClusters specifies the clusters where the service will be exposed, for clients.
// If leave it empty, the service will be exposed to all clusters. // If leave it empty, the service will be exposed to all clusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters.
// +optional // +optional
ServiceConsumptionClusters []string `json:"serviceConsumptionClusters,omitempty"` ServiceConsumptionClusters []string `json:"serviceConsumptionClusters,omitempty"`
// ProviderClusters specifies the clusters which will provide the service backend.
// If leave it empty, we will collect the backend endpoints from all clusters and sync
// them to the ConsumerClusters.
// +optional
ProviderClusters []ClusterSelector `json:"providerClusters,omitempty"`
// ConsumerClusters specifies the clusters where the service will be exposed, for clients.
// If leave it empty, the service will be exposed to all clusters.
// +optional
ConsumerClusters []ClusterSelector `json:"consumerClusters,omitempty"`
}
// ClusterSelector specifies the cluster to be selected.
type ClusterSelector struct {
// Name is the name of the cluster to be selected.
// +required
Name string `json:"name,omitempty"`
} }
// ExposureType describes how to expose the service. // ExposureType describes how to expose the service.

View File

@ -23,12 +23,4 @@ const (
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because: // The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change. // In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
MultiClusterServicePermanentIDLabel = "multiclusterservice.karmada.io/permanent-id" MultiClusterServicePermanentIDLabel = "multiclusterservice.karmada.io/permanent-id"
// MultiClusterServiceNameAnnotation is the name of a MultiClusterService object.
// This annotation will be added to the resource template and ResourceBinding
MultiClusterServiceNameAnnotation = "multiclusterservice.karmada.io/name"
// MultiClusterServiceNamespaceAnnotation is the namespace of a MultiClusterService object.
// This annotation will be added to the resource template and ResourceBinding
MultiClusterServiceNamespaceAnnotation = "multiclusterservice.karmada.io/namespace"
) )

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
@ -25,22 +9,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSelector) DeepCopyInto(out *ClusterSelector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelector.
func (in *ClusterSelector) DeepCopy() *ClusterSelector {
if in == nil {
return nil
}
out := new(ClusterSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposurePort) DeepCopyInto(out *ExposurePort) { func (in *ExposurePort) DeepCopyInto(out *ExposurePort) {
*out = *in *out = *in
@ -139,35 +107,6 @@ func (in *MultiClusterIngressList) DeepCopyObject() runtime.Object {
return nil return nil
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterIngressStatus) DeepCopyInto(out *MultiClusterIngressStatus) {
*out = *in
in.IngressStatus.DeepCopyInto(&out.IngressStatus)
if in.TrafficBlockClusters != nil {
in, out := &in.TrafficBlockClusters, &out.TrafficBlockClusters
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServiceLocations != nil {
in, out := &in.ServiceLocations, &out.ServiceLocations
*out = make([]ServiceLocation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterIngressStatus.
func (in *MultiClusterIngressStatus) DeepCopy() *MultiClusterIngressStatus {
if in == nil {
return nil
}
out := new(MultiClusterIngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterService) DeepCopyInto(out *MultiClusterService) { func (in *MultiClusterService) DeepCopyInto(out *MultiClusterService) {
*out = *in *out = *in
@ -253,16 +192,6 @@ func (in *MultiClusterServiceSpec) DeepCopyInto(out *MultiClusterServiceSpec) {
*out = make([]string, len(*in)) *out = make([]string, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.ProviderClusters != nil {
in, out := &in.ProviderClusters, &out.ProviderClusters
*out = make([]ClusterSelector, len(*in))
copy(*out, *in)
}
if in.ConsumerClusters != nil {
in, out := &in.ConsumerClusters, &out.ConsumerClusters
*out = make([]ClusterSelector, len(*in))
copy(*out, *in)
}
return return
} }
@ -275,24 +204,3 @@ func (in *MultiClusterServiceSpec) DeepCopy() *MultiClusterServiceSpec {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceLocation) DeepCopyInto(out *ServiceLocation) {
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceLocation.
func (in *ServiceLocation) DeepCopy() *ServiceLocation {
if in == nil {
return nil
}
out := new(ServiceLocation)
in.DeepCopyInto(out)
return out
}

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

View File

@ -0,0 +1,107 @@
package constants
import (
"time"
"k8s.io/apimachinery/pkg/labels"
)
const (
// KubeDefaultRepository defines the default of the k8s image repository
KubeDefaultRepository = "registry.k8s.io"
// KarmadaDefaultRepository defines the default of the karmada image repository
KarmadaDefaultRepository = "docker.io/karmada"
// EtcdDefaultVersion defines the default of the karmada etcd image tag
EtcdDefaultVersion = "3.5.3-0"
// KarmadaDefaultVersion defines the default of the karmada components image tag
KarmadaDefaultVersion = "v1.4.0"
// KubeDefaultVersion defines the default of the karmada apiserver and kubeControllerManager image tag
KubeDefaultVersion = "v1.25.4"
// KarmadaDefaultServiceSubnet defines the default of the subnet used by k8s services.
KarmadaDefaultServiceSubnet = "10.96.0.0/12"
// KarmadaDefaultDNSDomain defines the default of the DNSDomain
KarmadaDefaultDNSDomain = "cluster.local"
// KarmadaOperator defines the name of the karmada operator.
KarmadaOperator = "karmada-operator"
// Etcd defines the name of the built-in etcd cluster component
Etcd = "etcd"
// KarmadaAPIServer defines the name of the karmada-apiserver component
KarmadaAPIServer = "karmada-apiserver"
// KubeAPIServer defines the repository name of the kube apiserver
KubeAPIServer = "kube-apiserver"
// KarmadaAggregatedAPIServer defines the name of the karmada-aggregated-apiserver component
KarmadaAggregatedAPIServer = "karmada-aggregated-apiserver"
// KubeControllerManager defines the name of the kube-controller-manager component
KubeControllerManager = "kube-controller-manager"
// KarmadaControllerManager defines the name of the karmada-controller-manager component
KarmadaControllerManager = "karmada-controller-manager"
// KarmadaScheduler defines the name of the karmada-scheduler component
KarmadaScheduler = "karmada-scheduler"
// KarmadaWebhook defines the name of the karmada-webhook component
KarmadaWebhook = "karmada-webhook"
// KarmadaDescheduler defines the name of the karmada-descheduler component
KarmadaDescheduler = "karmada-descheduler"
// KarmadaSystemNamespace defines the leader selection namespace for karmada components
KarmadaSystemNamespace = "karmada-system"
// KarmadaDataDir defines the karmada data dir
KarmadaDataDir = "/var/lib/karmada"
// EtcdListenClientPort defines the port etcd listen on for client traffic
EtcdListenClientPort = 2379
// EtcdMetricsPort is the port at which to obtain etcd metrics and health status
EtcdMetricsPort = 2381
// EtcdListenPeerPort defines the port etcd listen on for peer traffic
EtcdListenPeerPort = 2380
// KarmadaAPIserverListenClientPort defines the port karmada apiserver listen on for client traffic
KarmadaAPIserverListenClientPort = 5443
// EtcdDataVolumeName defines the name to etcd data volume
EtcdDataVolumeName = "etcd-data"
// CertificateValidity Certificate validity period
CertificateValidity = time.Hour * 24 * 365
// CaCertAndKeyName ca certificate key name
CaCertAndKeyName = "ca"
// EtcdCaCertAndKeyName etcd ca certificate key name
EtcdCaCertAndKeyName = "etcd-ca"
// EtcdServerCertAndKeyName etcd server certificate key name
EtcdServerCertAndKeyName = "etcd-server"
// EtcdClientCertAndKeyName etcd client certificate key name
EtcdClientCertAndKeyName = "etcd-client"
// KarmadaCertAndKeyName karmada certificate key name
KarmadaCertAndKeyName = "karmada"
// ApiserverCertAndKeyName karmada apiserver certificate key name
ApiserverCertAndKeyName = "apiserver"
// FrontProxyCaCertAndKeyName front-proxy-client certificate key name
FrontProxyCaCertAndKeyName = "front-proxy-ca"
// FrontProxyClientCertAndKeyName front-proxy-client certificate key name
FrontProxyClientCertAndKeyName = "front-proxy-client"
// ClusterName karmada cluster name
ClusterName = "karmada-apiserver"
// UserName karmada cluster user name
UserName = "karmada-admin"
// KarmadaAPIserverComponent defines the name of karmada-apiserver component
KarmadaAPIserverComponent = "KarmadaAPIServer"
// KarmadaAggregatedAPIServerComponent defines the name of karmada-aggregated-apiserver component
KarmadaAggregatedAPIServerComponent = "KarmadaAggregatedAPIServer"
// KubeControllerManagerComponent defines the name of kube-controller-manager-component
KubeControllerManagerComponent = "KubeControllerManager"
// KarmadaControllerManagerComponent defines the name of karmada-controller-manager component
KarmadaControllerManagerComponent = "KarmadaControllerManager"
// KarmadaSchedulerComponent defines the name of karmada-scheduler component
KarmadaSchedulerComponent = "KarmadaScheduler"
// KarmadaWebhookComponent defines the name of the karmada-webhook component
KarmadaWebhookComponent = "KarmadaWebhook"
// KarmadaDeschedulerComponent defines the name of the karmada-descheduler component
KarmadaDeschedulerComponent = "KarmadaDescheduler"
// KarmadaOperatorLabelKeyName defines a label key used by all of resources created by karmada operator
KarmadaOperatorLabelKeyName = "app.kubernetes.io/managed-by"
)
var (
// KarmadaOperatorLabel defines the default labels in the resource create by karmada operator
KarmadaOperatorLabel = labels.Set{KarmadaOperatorLabelKeyName: KarmadaOperator}
)

View File

@ -28,7 +28,6 @@ func (image *Image) Name() string {
return fmt.Sprintf("%s:%s", image.ImageRepository, image.ImageTag) return fmt.Sprintf("%s:%s", image.ImageRepository, image.ImageTag)
} }
// KarmadaInProgressing sets the Karmada condition to Progressing.
func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message string) { func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{} karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{ newCondition := metav1.Condition{
@ -41,7 +40,6 @@ func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition) apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
} }
// KarmadaCompleted sets the Karmada condition to Completed.
func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message string) { func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{} karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{ newCondition := metav1.Condition{
@ -54,7 +52,6 @@ func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message str
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition) apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
} }
// KarmadaFailed sets the Karmada condition to Failed.
func KarmadaFailed(karmada *Karmada, conditionType ConditionType, message string) { func KarmadaFailed(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{} karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{ newCondition := metav1.Condition{

View File

@ -1,65 +0,0 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "operator.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme applies all the stored functions to the scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Karmada{},
&KarmadaList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -24,9 +24,9 @@ import (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=karmadas,scope=Namespaced,categories={karmada-io} // +kubebuilder:path=karmadas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="READY",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="Ready",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// Karmada enables declarative installation of karmada. // Karmada enables declarative installation of karmada.
type Karmada struct { type Karmada struct {
@ -44,38 +44,6 @@ type Karmada struct {
Status KarmadaStatus `json:"status,omitempty"` Status KarmadaStatus `json:"status,omitempty"`
} }
// CRDDownloadPolicy specifies a policy for how the operator will download the Karmada CRD tarball
type CRDDownloadPolicy string
const (
// DownloadAlways instructs the Karmada operator to always download the CRD tarball from a remote location.
DownloadAlways CRDDownloadPolicy = "Always"
// DownloadIfNotPresent instructs the Karmada operator to download the CRDs tarball from a remote location only if it is not yet present in the local cache.
DownloadIfNotPresent CRDDownloadPolicy = "IfNotPresent"
)
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
type HTTPSource struct {
// URL specifies the URL of the CRD tarball resource.
URL string `json:"url,omitempty"`
}
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
type CRDTarball struct {
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
// +optional
HTTPSource *HTTPSource `json:"httpSource,omitempty"`
// CRDDownloadPolicy specifies a policy that should be used to download the CRD tarball.
// Valid values are "Always" and "IfNotPresent".
// Defaults to "IfNotPresent".
// +kubebuilder:validation:Enum=Always;IfNotPresent
// +kubebuilder:default=IfNotPresent
// +optional
CRDDownloadPolicy *CRDDownloadPolicy `json:"crdDownloadPolicy,omitempty"`
}
// KarmadaSpec is the specification of the desired behavior of the Karmada. // KarmadaSpec is the specification of the desired behavior of the Karmada.
type KarmadaSpec struct { type KarmadaSpec struct {
// HostCluster represents the cluster where to install the Karmada control plane. // HostCluster represents the cluster where to install the Karmada control plane.
@ -104,47 +72,6 @@ type KarmadaSpec struct {
// More info: https://github.com/karmada-io/karmada/blob/master/pkg/features/features.go // More info: https://github.com/karmada-io/karmada/blob/master/pkg/features/features.go
// +optional // +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"` FeatureGates map[string]bool `json:"featureGates,omitempty"`
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
// If not set, the operator will download the tarball from a GitHub release.
// By default, it will download the tarball of the same version as the operator itself.
// For instance, if the operator's version is v1.10.0, the tarball will be downloaded from the following location:
// https://github.com/karmada-io/karmada/releases/download/v1.10.0/crds.tar.gz
// By default, the operator will only attempt to download the tarball if it's not yet present in the local cache.
// +optional
CRDTarball *CRDTarball `json:"crdTarball,omitempty"`
// CustomCertificate specifies the configuration to customize the certificates
// for Karmada components or control the certificate generation process, such as
// the algorithm, validity period, etc.
// Currently, it only supports customizing the CA certificate for limited components.
// +optional
CustomCertificate *CustomCertificate `json:"customCertificate,omitempty"`
// Suspend indicates that the operator should suspend reconciliation
// for this Karmada control plane and all its managed resources.
// Karmada instances for which this field is not explicitly set to `true` will continue to be reconciled as usual.
// +optional
Suspend *bool `json:"suspend,omitempty"`
}
// CustomCertificate holds the configuration for generating the certificate.
type CustomCertificate struct {
// APIServerCACert references a Kubernetes secret containing the CA certificate
// for component karmada-apiserver.
// The secret must contain the following data keys:
// - tls.crt: The TLS certificate.
// - tls.key: The TLS private key.
// If specified, this CA will be used to issue client certificates for
// all components that access the APIServer as clients.
// +optional
APIServerCACert *LocalSecretReference `json:"apiServerCACert,omitempty"`
// LeafCertValidityDays specifies the validity period of leaf certificates (e.g., API Server certificate) in days.
// If not specified, the default validity period of 1 year will be used.
// +kubebuilder:validation:Minimum=1
// +optional
LeafCertValidityDays *int32 `json:"leafCertValidityDays,omitempty"`
} }
// ImageRegistry represents an image registry as well as the // ImageRegistry represents an image registry as well as the
@ -271,32 +198,19 @@ type VolumeData struct {
// operator has no knowledge of where certificate files live, and they must be supplied. // operator has no knowledge of where certificate files live, and they must be supplied.
type ExternalEtcd struct { type ExternalEtcd struct {
// Endpoints of etcd members. Required for ExternalEtcd. // Endpoints of etcd members. Required for ExternalEtcd.
// +required
Endpoints []string `json:"endpoints"` Endpoints []string `json:"endpoints"`
// CAData is an SSL Certificate Authority file used to secure etcd communication. // CAData is an SSL Certificate Authority file used to secure etcd communication.
// Required if using a TLS connection. // Required if using a TLS connection.
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. CAData []byte `json:"caData"`
CAData []byte `json:"caData,omitempty"`
// CertData is an SSL certification file used to secure etcd communication. // CertData is an SSL certification file used to secure etcd communication.
// Required if using a TLS connection. // Required if using a TLS connection.
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. CertData []byte `json:"certData"`
CertData []byte `json:"certData,omitempty"`
// KeyData is an SSL key file used to secure etcd communication. // KeyData is an SSL key file used to secure etcd communication.
// Required if using a TLS connection. // Required if using a TLS connection.
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. KeyData []byte `json:"keyData"`
KeyData []byte `json:"keyData,omitempty"`
// SecretRef references a Kubernetes secret containing the etcd connection credentials.
// The secret must contain the following data keys:
// ca.crt: The Certificate Authority (CA) certificate data.
// tls.crt: The TLS certificate data used for verifying the etcd server's certificate.
// tls.key: The TLS private key.
// Required to configure the connection to an external etcd cluster.
// +required
SecretRef LocalSecretReference `json:"secretRef"`
} }
// KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes. // KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes.
@ -309,31 +223,11 @@ type KarmadaAPIServer struct {
// +optional // +optional
ServiceSubnet *string `json:"serviceSubnet,omitempty"` ServiceSubnet *string `json:"serviceSubnet,omitempty"`
// ServiceType represents the service type of Karmada API server. // ServiceType represents the service type of karmada apiserver.
// Valid options are: "ClusterIP", "NodePort", "LoadBalancer". // it is NodePort by default.
// Defaults to "ClusterIP".
//
// +kubebuilder:default="ClusterIP"
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
// +optional // +optional
ServiceType corev1.ServiceType `json:"serviceType,omitempty"` ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
// LoadBalancerClass specifies the load balancer implementation class for the Karmada API server.
// This field is applicable only when ServiceType is set to LoadBalancer.
// If specified, the service will be processed by the load balancer implementation that matches the specified class.
// By default, this is not set and the LoadBalancer type of Service uses the cloud provider's default load balancer
// implementation.
// Once set, it cannot be changed. The value must be a label-style identifier, with an optional prefix such as
// "internal-vip" or "example.com/internal-vip".
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
// +optional
LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
// ServiceAnnotations is an extra set of annotations for service of karmada apiserver.
// more info: https://github.com/karmada-io/karmada/issues/4634
// +optional
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
// ExtraArgs is an extra set of flags to pass to the kube-apiserver component or // ExtraArgs is an extra set of flags to pass to the kube-apiserver component or
// override. A key in this map is the flag name as it appears on the command line except // override. A key in this map is the flag name as it appears on the command line except
// without leading dash(es). // without leading dash(es).
@ -350,24 +244,6 @@ type KarmadaAPIServer struct {
// +optional // +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"` ExtraArgs map[string]string `json:"extraArgs,omitempty"`
// ExtraVolumes specifies a list of extra volumes for the API server's pod
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically attach volumes for the API server pod needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction
// with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases.
// +optional
ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"`
// ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically mount volumes into the API server container needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction
// with ExtraArgs and ExtraVolumes can be used to fulfil those use cases.
// +optional
ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"`
// CertSANs sets extra Subject Alternative Names for the API Server signing cert. // CertSANs sets extra Subject Alternative Names for the API Server signing cert.
// +optional // +optional
CertSANs []string `json:"certSANs,omitempty"` CertSANs []string `json:"certSANs,omitempty"`
@ -376,12 +252,6 @@ type KarmadaAPIServer struct {
// More info: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ // More info: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
// +optional // +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"` FeatureGates map[string]bool `json:"featureGates,omitempty"`
// SidecarContainers specifies a list of sidecar containers to be deployed
// within the Karmada API server pod.
// This enables users to integrate auxiliary services such as KMS plugins for configuring encryption at rest.
// +optional
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
} }
// KarmadaAggregatedAPIServer holds settings to karmada-aggregated-apiserver component of the karmada. // KarmadaAggregatedAPIServer holds settings to karmada-aggregated-apiserver component of the karmada.
@ -649,11 +519,6 @@ type CommonSettings struct {
// Image allows to customize the image used for the component. // Image allows to customize the image used for the component.
Image `json:",inline"` Image `json:",inline"`
// ImagePullPolicy defines the policy for pulling the container image.
// If not specified, it defaults to IfNotPresent.
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Number of desired pods. This is a pointer to distinguish between explicit // Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1. // zero and not specified. Defaults to 1.
// +optional // +optional
@ -677,12 +542,6 @@ type CommonSettings struct {
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional // +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"` Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// PriorityClassName specifies the priority class name for the component.
// If not specified, it defaults to "system-node-critical".
// +kubebuilder:default="system-node-critical"
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
} }
// Image allows to customize the image used for components. // Image allows to customize the image used for components.
@ -749,21 +608,6 @@ type KarmadaStatus struct {
// Conditions represents the latest available observations of a karmada's current state. // Conditions represents the latest available observations of a karmada's current state.
// +optional // +optional
Conditions []metav1.Condition `json:"conditions,omitempty"` Conditions []metav1.Condition `json:"conditions,omitempty"`
// APIServerService reports the location of the Karmada API server service which
// can be used by third-party applications to discover the Karmada Service, e.g.
// expose the service outside the cluster by Ingress.
// +optional
APIServerService *APIServerService `json:"apiServerService,omitempty"`
}
// APIServerService tells the location of Karmada API server service.
// Currently, it only includes the name of the service. The namespace
// of the service is the same as the namespace of the current Karmada object.
type APIServerService struct {
// Name represents the name of the Karmada API Server service.
// +required
Name string `json:"name"`
} }
// LocalSecretReference is a reference to a secret within the enclosing // LocalSecretReference is a reference to a secret within the enclosing

View File

@ -1,74 +1,16 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServerService) DeepCopyInto(out *APIServerService) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerService.
func (in *APIServerService) DeepCopy() *APIServerService {
if in == nil {
return nil
}
out := new(APIServerService)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CRDTarball) DeepCopyInto(out *CRDTarball) {
*out = *in
if in.HTTPSource != nil {
in, out := &in.HTTPSource, &out.HTTPSource
*out = new(HTTPSource)
**out = **in
}
if in.CRDDownloadPolicy != nil {
in, out := &in.CRDDownloadPolicy, &out.CRDDownloadPolicy
*out = new(CRDDownloadPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDTarball.
func (in *CRDTarball) DeepCopy() *CRDTarball {
if in == nil {
return nil
}
out := new(CRDTarball)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonSettings) DeepCopyInto(out *CommonSettings) { func (in *CommonSettings) DeepCopyInto(out *CommonSettings) {
*out = *in *out = *in
@ -106,32 +48,6 @@ func (in *CommonSettings) DeepCopy() *CommonSettings {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomCertificate) DeepCopyInto(out *CustomCertificate) {
*out = *in
if in.APIServerCACert != nil {
in, out := &in.APIServerCACert, &out.APIServerCACert
*out = new(LocalSecretReference)
**out = **in
}
if in.LeafCertValidityDays != nil {
in, out := &in.LeafCertValidityDays, &out.LeafCertValidityDays
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCertificate.
func (in *CustomCertificate) DeepCopy() *CustomCertificate {
if in == nil {
return nil
}
out := new(CustomCertificate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Etcd) DeepCopyInto(out *Etcd) { func (in *Etcd) DeepCopyInto(out *Etcd) {
*out = *in *out = *in
@ -181,7 +97,6 @@ func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = make([]byte, len(*in)) *out = make([]byte, len(*in))
copy(*out, *in) copy(*out, *in)
} }
out.SecretRef = in.SecretRef
return return
} }
@ -195,22 +110,6 @@ func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPSource) DeepCopyInto(out *HTTPSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSource.
func (in *HTTPSource) DeepCopy() *HTTPSource {
if in == nil {
return nil
}
out := new(HTTPSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostCluster) DeepCopyInto(out *HostCluster) { func (in *HostCluster) DeepCopyInto(out *HostCluster) {
*out = *in *out = *in
@ -306,18 +205,6 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
*out = new(string) *out = new(string)
**out = **in **out = **in
} }
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.ServiceAnnotations != nil {
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraArgs != nil { if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))
@ -325,20 +212,6 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.ExtraVolumes != nil {
in, out := &in.ExtraVolumes, &out.ExtraVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtraVolumeMounts != nil {
in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CertSANs != nil { if in.CertSANs != nil {
in, out := &in.CertSANs, &out.CertSANs in, out := &in.CertSANs, &out.CertSANs
*out = make([]string, len(*in)) *out = make([]string, len(*in))
@ -351,13 +224,6 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.SidecarContainers != nil {
in, out := &in.SidecarContainers, &out.SidecarContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }
@ -670,21 +536,6 @@ func (in *KarmadaSpec) DeepCopyInto(out *KarmadaSpec) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.CRDTarball != nil {
in, out := &in.CRDTarball, &out.CRDTarball
*out = new(CRDTarball)
(*in).DeepCopyInto(*out)
}
if in.CustomCertificate != nil {
in, out := &in.CustomCertificate, &out.CustomCertificate
*out = new(CustomCertificate)
(*in).DeepCopyInto(*out)
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
return return
} }
@ -708,16 +559,11 @@ func (in *KarmadaStatus) DeepCopyInto(out *KarmadaStatus) {
} }
if in.Conditions != nil { if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in)) *out = make([]v1.Condition, len(*in))
for i := range *in { for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
} }
if in.APIServerService != nil {
in, out := &in.APIServerService, &out.APIServerService
*out = new(APIServerService)
**out = **in
}
return return
} }
@ -865,17 +711,17 @@ func (in *VolumeData) DeepCopyInto(out *VolumeData) {
*out = *in *out = *in
if in.VolumeClaim != nil { if in.VolumeClaim != nil {
in, out := &in.VolumeClaim, &out.VolumeClaim in, out := &in.VolumeClaim, &out.VolumeClaim
*out = new(v1.PersistentVolumeClaimTemplate) *out = new(corev1.PersistentVolumeClaimTemplate)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.HostPath != nil { if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath in, out := &in.HostPath, &out.HostPath
*out = new(v1.HostPathVolumeSource) *out = new(corev1.HostPathVolumeSource)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.EmptyDir != nil { if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir in, out := &in.EmptyDir, &out.EmptyDir
*out = new(v1.EmptyDirVolumeSource) *out = new(corev1.EmptyDirVolumeSource)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
return return

View File

@ -1,129 +0,0 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:path=clustertaintpolicies,scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicy automates taint management on Cluster objects based
// on declarative conditions.
// The system evaluates AddOnConditions to determine when to add taints,
// and RemoveOnConditions to determine when to remove taints.
// AddOnConditions are evaluated before RemoveOnConditions.
// Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
type ClusterTaintPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of ClusterTaintPolicy.
// +required
Spec ClusterTaintPolicySpec `json:"spec"`
}
// ClusterTaintPolicySpec represents the desired behavior of ClusterTaintPolicy.
type ClusterTaintPolicySpec struct {
// TargetClusters specifies the clusters that ClusterTaintPolicy needs
// to pay attention to.
// For clusters that no longer match the TargetClusters, the taints
// will be kept unchanged.
// If targetClusters is not set, any cluster can be selected.
// +optional
TargetClusters *ClusterAffinity `json:"targetClusters,omitempty"`
// AddOnConditions defines the conditions to match for triggering
// the controller to add taints on the cluster object.
// The match conditions are ANDed.
// If AddOnConditions is empty, no taints will be added.
// +optional
AddOnConditions []MatchCondition `json:"addOnConditions,omitempty"`
// RemoveOnConditions defines the conditions to match for triggering
// the controller to remove taints from the cluster object.
// The match conditions are ANDed.
// If RemoveOnConditions is empty, no taints will be removed.
// +optional
RemoveOnConditions []MatchCondition `json:"removeOnConditions,omitempty"`
// Taints specifies the taints that need to be added or removed on
// the cluster object which match with TargetClusters.
// If the Taints is modified, the system will process the taints based on
// the latest value of Taints during the next condition-triggered execution,
// regardless of whether the taint has been added or removed.
// +kubebuilder:validation:MinItems=1
// +required
Taints []Taint `json:"taints"`
}
// MatchCondition represents the condition match detail of activating the failover
// relevant taints on target clusters.
type MatchCondition struct {
// ConditionType specifies the ClusterStatus condition type.
// +required
ConditionType string `json:"conditionType"`
// Operator represents a relationship to a set of values.
// Valid operators are In, NotIn.
// +required
Operator MatchConditionOperator `json:"operator"`
// StatusValues is an array of metav1.ConditionStatus values.
// The item specifies the ClusterStatus condition status.
// +required
StatusValues []metav1.ConditionStatus `json:"statusValues"`
}
// A MatchConditionOperator operator is the set of operators that can be used in the match condition.
type MatchConditionOperator string
const (
// MatchConditionOpIn represents the operator In.
MatchConditionOpIn MatchConditionOperator = "In"
// MatchConditionOpNotIn represents the operator NotIn.
MatchConditionOpNotIn MatchConditionOperator = "NotIn"
)
// Taint describes the taint that needs to be applied to the cluster.
type Taint struct {
// Key represents the taint key to be applied to a cluster.
// +required
Key string `json:"key"`
// Effect represents the taint effect to be applied to a cluster.
// +required
Effect corev1.TaintEffect `json:"effect"`
// Value represents the taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty"`
}
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicyList contains a list of ClusterTaintPolicy
type ClusterTaintPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterTaintPolicy `json:"items"`
}

View File

@ -34,11 +34,9 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=federatedresourcequotas,scope=Namespaced,categories={karmada-io} // +kubebuilder:resource:categories={karmada-io}
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:storageversion // +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.overall`,name=`OVERALL`,type=string
// +kubebuilder:printcolumn:JSONPath=`.status.overallUsed`,name=`OVERALL_USED`,type=string
// FederatedResourceQuota sets aggregate quota restrictions enforced per namespace across all clusters. // FederatedResourceQuota sets aggregate quota restrictions enforced per namespace across all clusters.
type FederatedResourceQuota struct { type FederatedResourceQuota struct {
@ -60,16 +58,9 @@ type FederatedResourceQuotaSpec struct {
// +required // +required
Overall corev1.ResourceList `json:"overall"` Overall corev1.ResourceList `json:"overall"`
// StaticAssignments specifies ResourceQuota settings for specific clusters. // StaticAssignments represents the subset of desired hard limits for each cluster.
// If non-empty, Karmada will create ResourceQuotas in the corresponding clusters. // Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
// Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created. // clusters will have no quotas in the referencing namespace.
//
// This field addresses multi-cluster configuration management challenges by allowing centralized
// control over ResourceQuotas across clusters.
//
// Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
// Future updates may integrate it into the scheduling logic.
//
// +optional // +optional
StaticAssignments []StaticClusterAssignment `json:"staticAssignments,omitempty"` StaticAssignments []StaticClusterAssignment `json:"staticAssignments,omitempty"`

View File

@ -43,7 +43,7 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=overridepolicies,scope=Namespaced,shortName=op,categories={karmada-io} // +kubebuilder:resource:shortName=op,categories={karmada-io}
// OverridePolicy represents the policy that overrides a group of resources to one or more clusters. // OverridePolicy represents the policy that overrides a group of resources to one or more clusters.
type OverridePolicy struct { type OverridePolicy struct {
@ -101,7 +101,6 @@ type RuleWithCluster struct {
// - ArgsOverrider // - ArgsOverrider
// - LabelsOverrider // - LabelsOverrider
// - AnnotationsOverrider // - AnnotationsOverrider
// - FieldOverrider
// - Plaintext // - Plaintext
type Overriders struct { type Overriders struct {
// Plaintext represents override rules defined with plaintext overriders. // Plaintext represents override rules defined with plaintext overriders.
@ -127,13 +126,6 @@ type Overriders struct {
// AnnotationsOverrider represents the rules dedicated to handling workload annotations // AnnotationsOverrider represents the rules dedicated to handling workload annotations
// +optional // +optional
AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"` AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"`
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// +optional
FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"`
} }
// LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations // LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations
@ -263,65 +255,6 @@ const (
OverriderOpReplace OverriderOperator = "replace" OverriderOpReplace OverriderOperator = "replace"
) )
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.
type FieldOverrider struct {
// FieldPath specifies the initial location in the instance document where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml"
// specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml".
// +required
FieldPath string `json:"fieldPath"`
// JSON represents the operations performed on the JSON document specified by the FieldPath.
// +optional
JSON []JSONPatchOperation `json:"json,omitempty"`
// YAML represents the operations performed on the YAML document specified by the FieldPath.
// +optional
YAML []YAMLPatchOperation `json:"yaml,omitempty"`
}
// JSONPatchOperation represents a single field modification operation for JSON format.
type JSONPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// YAMLPatchOperation represents a single field modification operation for YAML format.
type YAMLPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OverridePolicyList is a collection of OverridePolicy. // OverridePolicyList is a collection of OverridePolicy.
@ -335,7 +268,7 @@ type OverridePolicyList struct {
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +kubebuilder:resource:path=clusteroverridepolicies,scope="Cluster",shortName=cop,categories={karmada-io} // +kubebuilder:resource:scope="Cluster",shortName=cop,categories={karmada-io}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterOverridePolicy represents the cluster-wide policy that overrides a group of resources to one or more clusters. // ClusterOverridePolicy represents the cluster-wide policy that overrides a group of resources to one or more clusters.

View File

@ -19,7 +19,7 @@ package v1alpha1
import ( import (
"testing" "testing"
"k8s.io/utils/ptr" "k8s.io/utils/pointer"
) )
func TestPropagationPolicy_ExplicitPriority(t *testing.T) { func TestPropagationPolicy_ExplicitPriority(t *testing.T) {
@ -34,7 +34,7 @@ func TestPropagationPolicy_ExplicitPriority(t *testing.T) {
}, },
{ {
name: "expected to be declared priority in pp", name: "expected to be declared priority in pp",
declaredPriority: ptr.To[int32](20), declaredPriority: pointer.Int32(20),
expectedPriority: 20, expectedPriority: 20,
}, },
} }
@ -62,7 +62,7 @@ func TestClusterPropagationPolicy_ExplicitPriority(t *testing.T) {
}, },
{ {
name: "expected to be declared priority in cpp", name: "expected to be declared priority in cpp",
declaredPriority: ptr.To[int32](20), declaredPriority: pointer.Int32(20),
expectedPriority: 20, expectedPriority: 20,
}, },
} }

View File

@ -43,10 +43,7 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=propagationpolicies,scope=Namespaced,shortName=pp,categories={karmada-io} // +kubebuilder:resource:shortName=pp,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// PropagationPolicy represents the policy that propagates a group of resources to one or more clusters. // PropagationPolicy represents the policy that propagates a group of resources to one or more clusters.
type PropagationPolicy struct { type PropagationPolicy struct {
@ -155,68 +152,6 @@ type PropagationSpec struct {
// +kubebuilder:validation:Enum=Abort;Overwrite // +kubebuilder:validation:Enum=Abort;Overwrite
// +optional // +optional
ConflictResolution ConflictResolution `json:"conflictResolution,omitempty"` ConflictResolution ConflictResolution `json:"conflictResolution,omitempty"`
// ActivationPreference indicates how the referencing resource template will
// be propagated, in case of policy changes.
//
// If empty, the resource template will respond to policy changes
// immediately, in other words, any policy changes will drive the resource
// template to be propagated immediately as per the current propagation rules.
//
// If the value is 'Lazy' means the policy changes will not take effect for now
// but defer to the resource template changes, in other words, the resource
// template will not be propagated as per the current propagation rules until
// there is an update on it.
// This is an experimental feature that might help in a scenario where a policy
// manages huge amount of resource templates, changes to a policy typically
// affect numerous applications simultaneously. A minor misconfiguration
// could lead to widespread failures. With this feature, the change can be
// gradually rolled out through iterative modifications of resource templates.
//
// +kubebuilder:validation:Enum=Lazy
// +optional
ActivationPreference ActivationPreference `json:"activationPreference,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the resource template is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the resource template.
//
// This setting is particularly useful during workload migration scenarios to ensure
// that rollback can occur quickly without affecting the workloads running on the
// member clusters.
//
// Additionally, this setting applies uniformly across all member clusters and will not
// selectively control preservation on only some clusters.
//
// Note: This setting does not apply to the deletion of the policy itself.
// When the policy is deleted, the resource templates and their corresponding
// propagated resources in member clusters will remain unchanged unless explicitly deleted.
//
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
//
// This setting is useful for controlling the scheduling behavior of offline workloads.
// By setting a higher or lower priority, users can control which workloads are scheduled first.
// Additionally, it allows specifying a preemption policy where higher-priority workloads can
// preempt lower-priority ones in scenarios of resource contention.
//
// Note: This feature is currently in the alpha stage. The priority-based scheduling functionality is
// controlled by the PriorityBasedScheduling feature gate, and preemption is controlled by the
// PriorityBasedPreemptiveScheduling feature gate. Currently, only priority-based scheduling is
// supported. Preemption functionality is not yet available and will be introduced in future
// releases as the feature matures.
//
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
} }
// ResourceSelector the resources will be selected. // ResourceSelector the resources will be selected.
@ -251,44 +186,13 @@ type FieldSelector struct {
MatchExpressions []corev1.NodeSelectorRequirement `json:"matchExpressions,omitempty"` MatchExpressions []corev1.NodeSelectorRequirement `json:"matchExpressions,omitempty"`
} }
// Suspension defines the policy for suspending different aspects of propagation. // PurgeMode represents that how to deal with the legacy applications on the
type Suspension struct {
// Dispatching controls whether dispatching should be suspended.
// nil means not suspend, no default value, only accepts 'true'.
// Note: true means stop propagating to all clusters. Can not co-exist
// with DispatchingOnClusters which is used to suspend particular clusters.
// +optional
Dispatching *bool `json:"dispatching,omitempty"`
// DispatchingOnClusters declares a list of clusters to which the dispatching
// should be suspended.
// Note: Can not co-exist with Dispatching which is used to suspend all.
// +optional
DispatchingOnClusters *SuspendClusters `json:"dispatchingOnClusters,omitempty"`
}
// SuspendClusters represents a group of clusters that should be suspended from propagating.
// Note: No plan to introduce the label selector or field selector to select clusters yet, as it
// would make the system unpredictable.
type SuspendClusters struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// PurgeMode represents how to deal with the legacy application on the
// cluster from which the application is migrated. // cluster from which the application is migrated.
type PurgeMode string type PurgeMode string
const ( const (
// Immediately represents that Karmada will immediately evict the legacy // Immediately represents that Karmada will immediately evict the legacy
// application. This is useful in scenarios where an application can not // application.
// tolerate two instances running simultaneously.
// For example, the Flink application supports exactly-once state consistency,
// which means it requires that no two instances of the application are running
// at the same time. During a failover, it is crucial to ensure that the old
// application is removed before creating a new one to avoid duplicate
// processing and maintaining state consistency.
Immediately PurgeMode = "Immediately" Immediately PurgeMode = "Immediately"
// Graciously represents that Karmada will wait for the application to // Graciously represents that Karmada will wait for the application to
// come back to healthy on the new cluster or after a timeout is reached // come back to healthy on the new cluster or after a timeout is reached
@ -328,7 +232,6 @@ type ApplicationFailoverBehavior struct {
// cluster from which the application is migrated. // cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never". // Valid options are "Immediately", "Graciously" and "Never".
// Defaults to "Graciously". // Defaults to "Graciously".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +kubebuilder:default=Graciously // +kubebuilder:default=Graciously
// +optional // +optional
PurgeMode PurgeMode `json:"purgeMode,omitempty"` PurgeMode PurgeMode `json:"purgeMode,omitempty"`
@ -341,23 +244,6 @@ type ApplicationFailoverBehavior struct {
// Value must be positive integer. // Value must be positive integer.
// +optional // +optional
GracePeriodSeconds *int32 `json:"gracePeriodSeconds,omitempty"` GracePeriodSeconds *int32 `json:"gracePeriodSeconds,omitempty"`
// StatePreservation defines the policy for preserving and restoring state data
// during failover events for stateful applications.
//
// When an application fails over from one cluster to another, this policy enables
// the extraction of critical data from the original resource configuration.
// Upon successful migration, the extracted data is then re-injected into the new
// resource, ensuring that the application can resume operation with its previous
// state intact.
// This is particularly useful for stateful applications where maintaining data
// consistency across failover events is crucial.
// If not specified, means no state data will be preserved.
//
// Note: This requires the StatefulFailoverInjection feature gate to be enabled,
// which is alpha.
// +optional
StatePreservation *StatePreservation `json:"statePreservation,omitempty"`
} }
// DecisionConditions represents the decision conditions of performing the failover process. // DecisionConditions represents the decision conditions of performing the failover process.
@ -371,41 +257,6 @@ type DecisionConditions struct {
TolerationSeconds *int32 `json:"tolerationSeconds,omitempty"` TolerationSeconds *int32 `json:"tolerationSeconds,omitempty"`
} }
// StatePreservation defines the policy for preserving state during failover events.
type StatePreservation struct {
// Rules contains a list of StatePreservationRule configurations.
// Each rule specifies a JSONPath expression targeting specific pieces of
// state data to be preserved during failover events. An AliasLabelName is associated
// with each rule, serving as a label key when the preserved data is passed
// to the new cluster.
// +required
Rules []StatePreservationRule `json:"rules"`
}
// StatePreservationRule defines a single rule for state preservation.
// It includes a JSONPath expression and an alias name that will be used
// as a label key when passing state information to the new cluster.
type StatePreservationRule struct {
// AliasLabelName is the name that will be used as a label key when the preserved
// data is passed to the new cluster. This facilitates the injection of the
// preserved state back into the application resources during recovery.
// +required
AliasLabelName string `json:"aliasLabelName"`
// JSONPath is the JSONPath template used to identify the state data
// to be preserved from the original resource configuration.
// The JSONPath syntax follows the Kubernetes specification:
// https://kubernetes.io/docs/reference/kubectl/jsonpath/
//
// Note: The JSONPath expression will start searching from the "status" field of
// the API resource object by default. For example, to extract the "availableReplicas"
// from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
// "{.status.availableReplicas}".
//
// +required
JSONPath string `json:"jsonPath"`
}
// Placement represents the rule for select clusters. // Placement represents the rule for select clusters.
type Placement struct { type Placement struct {
// ClusterAffinity represents scheduling restrictions to a certain set of clusters. // ClusterAffinity represents scheduling restrictions to a certain set of clusters.
@ -667,64 +518,6 @@ const (
ConflictAbort ConflictResolution = "Abort" ConflictAbort ConflictResolution = "Abort"
) )
// ActivationPreference indicates how the referencing resource template will be propagated, in case of policy changes.
type ActivationPreference string
const (
// LazyActivation means the policy changes will not take effect for now but defer to the resource template changes,
// in other words, the resource template will not be propagated as per the current propagation rules until
// there is an update on it.
LazyActivation ActivationPreference = "Lazy"
)
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
type SchedulePriority struct {
// PriorityClassSource specifies where Karmada should look for the PriorityClass definition.
// Available options:
// - KubePriorityClass: Uses Kubernetes PriorityClass (scheduling.k8s.io/v1)
// - PodPriorityClass: Uses PriorityClassName from PodTemplate: PodSpec.PriorityClassName (not yet implemented)
// - FederatedPriorityClass: Uses Karmada FederatedPriorityClass (not yet implemented)
//
// +kubebuilder:validation:Enum=KubePriorityClass
// +required
PriorityClassSource PriorityClassSource `json:"priorityClassSource"`
// PriorityClassName specifies which PriorityClass to use. Its behavior depends on PriorityClassSource:
//
// Behavior of PriorityClassName:
//
// For KubePriorityClass:
// - When specified: Uses the named Kubernetes PriorityClass.
//
// For PodPriorityClass:
// - Uses PriorityClassName from the PodTemplate.
// - Not yet implemented.
//
// For FederatedPriorityClass:
// - Not yet implemented.
//
// +required
PriorityClassName string `json:"priorityClassName"`
}
// PriorityClassSource defines the type for PriorityClassSource field.
type PriorityClassSource string
const (
// FederatedPriorityClass specifies to use Karmada FederatedPriorityClass for priority resolution.
// This feature is planned for future releases and is currently not implemented.
FederatedPriorityClass PriorityClassSource = "FederatedPriorityClass"
// KubePriorityClass specifies to use Kubernetes native PriorityClass (scheduling.k8s.io/v1)
// for priority resolution. This is the default source.
KubePriorityClass PriorityClassSource = "KubePriorityClass"
// PodPriorityClass specifies to use the PriorityClassName defined in the workload's
// PodTemplate for priority resolution.
PodPriorityClass PriorityClassSource = "PodPriorityClass"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PropagationPolicyList contains a list of PropagationPolicy. // PropagationPolicyList contains a list of PropagationPolicy.
@ -736,11 +529,8 @@ type PropagationPolicyList struct {
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +kubebuilder:resource:path=clusterpropagationpolicies,scope="Cluster",shortName=cpp,categories={karmada-io} // +kubebuilder:resource:scope="Cluster",shortName=cpp,categories={karmada-io}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ClusterPropagationPolicy represents the cluster-wide policy that propagates a group of resources to one or more clusters. // ClusterPropagationPolicy represents the cluster-wide policy that propagates a group of resources to one or more clusters.
// Different with PropagationPolicy that could only propagate resources in its own namespace, ClusterPropagationPolicy // Different with PropagationPolicy that could only propagate resources in its own namespace, ClusterPropagationPolicy

View File

@ -16,7 +16,6 @@ limitations under the License.
package v1alpha1 package v1alpha1
// The well-known label key constant.
const ( const (
// PropagationPolicyPermanentIDLabel is the identifier of a PropagationPolicy object. // PropagationPolicyPermanentIDLabel is the identifier of a PropagationPolicy object.
// Karmada generates a unique identifier, such as metadata.UUID, for each PropagationPolicy object. // Karmada generates a unique identifier, such as metadata.UUID, for each PropagationPolicy object.
@ -32,6 +31,30 @@ const (
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change. // In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
ClusterPropagationPolicyPermanentIDLabel = "clusterpropagationpolicy.karmada.io/permanent-id" ClusterPropagationPolicyPermanentIDLabel = "clusterpropagationpolicy.karmada.io/permanent-id"
// PropagationPolicyUIDLabel is the uid of PropagationPolicy object.
PropagationPolicyUIDLabel = "propagationpolicy.karmada.io/uid"
// PropagationPolicyNamespaceAnnotation is added to objects to specify associated PropagationPolicy namespace.
PropagationPolicyNamespaceAnnotation = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyNameAnnotation is added to objects to specify associated PropagationPolicy name.
PropagationPolicyNameAnnotation = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyUIDLabel is the uid of ClusterPropagationPolicy object.
ClusterPropagationPolicyUIDLabel = "clusterpropagationpolicy.karmada.io/uid"
// ClusterPropagationPolicyAnnotation is added to objects to specify associated ClusterPropagationPolicy name.
ClusterPropagationPolicyAnnotation = "clusterpropagationpolicy.karmada.io/name"
// PropagationPolicyNamespaceLabel is added to objects to specify associated PropagationPolicy namespace.
PropagationPolicyNamespaceLabel = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyNameLabel is added to objects to specify associated PropagationPolicy's name.
PropagationPolicyNameLabel = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyLabel is added to objects to specify associated ClusterPropagationPolicy.
ClusterPropagationPolicyLabel = "clusterpropagationpolicy.karmada.io/name"
// NamespaceSkipAutoPropagationLabel is added to namespace objects to indicate if // NamespaceSkipAutoPropagationLabel is added to namespace objects to indicate if
// the namespace should be skipped from propagating by the namespace controller. // the namespace should be skipped from propagating by the namespace controller.
// For example, a namespace with the following label will be skipped: // For example, a namespace with the following label will be skipped:
@ -42,15 +65,3 @@ const (
// synced to new member clusters, but old member clusters still have it. // synced to new member clusters, but old member clusters still have it.
NamespaceSkipAutoPropagationLabel = "namespace.karmada.io/skip-auto-propagation" NamespaceSkipAutoPropagationLabel = "namespace.karmada.io/skip-auto-propagation"
) )
// The well-known annotation key constant.
const (
// PropagationPolicyNamespaceAnnotation is added to objects to specify associated PropagationPolicy namespace.
PropagationPolicyNamespaceAnnotation = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyNameAnnotation is added to objects to specify associated PropagationPolicy name.
PropagationPolicyNameAnnotation = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyAnnotation is added to objects to specify associated ClusterPropagationPolicy name.
ClusterPropagationPolicyAnnotation = "clusterpropagationpolicy.karmada.io/name"
)

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
@ -36,11 +20,6 @@ func (in *ApplicationFailoverBehavior) DeepCopyInto(out *ApplicationFailoverBeha
*out = new(int32) *out = new(int32)
**out = **in **out = **in
} }
if in.StatePreservation != nil {
in, out := &in.StatePreservation, &out.StatePreservation
*out = new(StatePreservation)
(*in).DeepCopyInto(*out)
}
return return
} }
@ -267,106 +246,6 @@ func (in *ClusterQuotaStatus) DeepCopy() *ClusterQuotaStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicy) DeepCopyInto(out *ClusterTaintPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicy.
func (in *ClusterTaintPolicy) DeepCopy() *ClusterTaintPolicy {
if in == nil {
return nil
}
out := new(ClusterTaintPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicyList) DeepCopyInto(out *ClusterTaintPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterTaintPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicyList.
func (in *ClusterTaintPolicyList) DeepCopy() *ClusterTaintPolicyList {
if in == nil {
return nil
}
out := new(ClusterTaintPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicySpec) DeepCopyInto(out *ClusterTaintPolicySpec) {
*out = *in
if in.TargetClusters != nil {
in, out := &in.TargetClusters, &out.TargetClusters
*out = new(ClusterAffinity)
(*in).DeepCopyInto(*out)
}
if in.AddOnConditions != nil {
in, out := &in.AddOnConditions, &out.AddOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RemoveOnConditions != nil {
in, out := &in.RemoveOnConditions, &out.RemoveOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]Taint, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicySpec.
func (in *ClusterTaintPolicySpec) DeepCopy() *ClusterTaintPolicySpec {
if in == nil {
return nil
}
out := new(ClusterTaintPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommandArgsOverrider) DeepCopyInto(out *CommandArgsOverrider) { func (in *CommandArgsOverrider) DeepCopyInto(out *CommandArgsOverrider) {
*out = *in *out = *in
@ -558,36 +437,6 @@ func (in *FederatedResourceQuotaStatus) DeepCopy() *FederatedResourceQuotaStatus
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldOverrider) DeepCopyInto(out *FieldOverrider) {
*out = *in
if in.JSON != nil {
in, out := &in.JSON, &out.JSON
*out = make([]JSONPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.YAML != nil {
in, out := &in.YAML, &out.YAML
*out = make([]YAMLPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldOverrider.
func (in *FieldOverrider) DeepCopy() *FieldOverrider {
if in == nil {
return nil
}
out := new(FieldOverrider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldSelector) DeepCopyInto(out *FieldSelector) { func (in *FieldSelector) DeepCopyInto(out *FieldSelector) {
*out = *in *out = *in
@ -648,23 +497,6 @@ func (in *ImagePredicate) DeepCopy() *ImagePredicate {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONPatchOperation) DeepCopyInto(out *JSONPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchOperation.
func (in *JSONPatchOperation) DeepCopy() *JSONPatchOperation {
if in == nil {
return nil
}
out := new(JSONPatchOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) { func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) {
*out = *in *out = *in
@ -688,27 +520,6 @@ func (in *LabelAnnotationOverrider) DeepCopy() *LabelAnnotationOverrider {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
if in.StatusValues != nil {
in, out := &in.StatusValues, &out.StatusValues
*out = make([]v1.ConditionStatus, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition.
func (in *MatchCondition) DeepCopy() *MatchCondition {
if in == nil {
return nil
}
out := new(MatchCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OverridePolicy) DeepCopyInto(out *OverridePolicy) { func (in *OverridePolicy) DeepCopyInto(out *OverridePolicy) {
*out = *in *out = *in
@ -850,13 +661,6 @@ func (in *Overriders) DeepCopyInto(out *Overriders) {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
} }
if in.FieldOverrider != nil {
in, out := &in.FieldOverrider, &out.FieldOverrider
*out = make([]FieldOverrider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }
@ -1018,21 +822,6 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) {
*out = new(FailoverBehavior) *out = new(FailoverBehavior)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return return
} }
@ -1110,22 +899,6 @@ func (in *RuleWithCluster) DeepCopy() *RuleWithCluster {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SpreadConstraint) DeepCopyInto(out *SpreadConstraint) { func (in *SpreadConstraint) DeepCopyInto(out *SpreadConstraint) {
*out = *in *out = *in
@ -1142,43 +915,6 @@ func (in *SpreadConstraint) DeepCopy() *SpreadConstraint {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservation) DeepCopyInto(out *StatePreservation) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]StatePreservationRule, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservation.
func (in *StatePreservation) DeepCopy() *StatePreservation {
if in == nil {
return nil
}
out := new(StatePreservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservationRule) DeepCopyInto(out *StatePreservationRule) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservationRule.
func (in *StatePreservationRule) DeepCopy() *StatePreservationRule {
if in == nil {
return nil
}
out := new(StatePreservationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StaticClusterAssignment) DeepCopyInto(out *StaticClusterAssignment) { func (in *StaticClusterAssignment) DeepCopyInto(out *StaticClusterAssignment) {
*out = *in *out = *in
@ -1218,83 +954,3 @@ func (in *StaticClusterWeight) DeepCopy() *StaticClusterWeight {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuspendClusters) DeepCopyInto(out *SuspendClusters) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendClusters.
func (in *SuspendClusters) DeepCopy() *SuspendClusters {
if in == nil {
return nil
}
out := new(SuspendClusters)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
if in.Dispatching != nil {
in, out := &in.Dispatching, &out.Dispatching
*out = new(bool)
**out = **in
}
if in.DispatchingOnClusters != nil {
in, out := &in.DispatchingOnClusters, &out.DispatchingOnClusters
*out = new(SuspendClusters)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
func (in *Taint) DeepCopy() *Taint {
if in == nil {
return nil
}
out := new(Taint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *YAMLPatchOperation) DeepCopyInto(out *YAMLPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YAMLPatchOperation.
func (in *YAMLPatchOperation) DeepCopy() *YAMLPatchOperation {
if in == nil {
return nil
}
out := new(YAMLPatchOperation)
in.DeepCopyInto(out)
return out
}

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )
@ -65,8 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ClusterOverridePolicyList{}, &ClusterOverridePolicyList{},
&ClusterPropagationPolicy{}, &ClusterPropagationPolicy{},
&ClusterPropagationPolicyList{}, &ClusterPropagationPolicyList{},
&ClusterTaintPolicy{},
&ClusterTaintPolicyList{},
&FederatedResourceQuota{}, &FederatedResourceQuota{},
&FederatedResourceQuotaList{}, &FederatedResourceQuotaList{},
&OverridePolicy{}, &OverridePolicy{},

View File

@ -1,21 +0,0 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
// +groupName=remedy.karmada.io
package v1alpha1

View File

@ -1,123 +0,0 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=remedies,scope="Cluster",categories={karmada-io}
// Remedy represents the cluster-level management strategies based on cluster conditions.
type Remedy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of Remedy.
// +required
Spec RemedySpec `json:"spec"`
}
// RemedySpec represents the desired behavior of Remedy.
type RemedySpec struct {
// ClusterAffinity specifies the clusters that Remedy needs to pay attention to.
// For clusters that meet the DecisionConditions, Actions will be preformed.
// If empty, all clusters will be selected.
// +optional
ClusterAffinity *ClusterAffinity `json:"clusterAffinity,omitempty"`
// DecisionMatches indicates the decision matches of triggering the remedy
// system to perform the actions. As long as any one DecisionMatch matches,
// the Actions will be preformed.
// If empty, the Actions will be performed immediately.
// +optional
DecisionMatches []DecisionMatch `json:"decisionMatches,omitempty"`
// Actions specifies the actions that remedy system needs to perform.
// If empty, no action will be performed.
// +optional
Actions []RemedyAction `json:"actions,omitempty"`
}
// DecisionMatch represents the decision match detail of activating the remedy system.
type DecisionMatch struct {
// ClusterConditionMatch describes the cluster condition requirement.
// +optional
ClusterConditionMatch *ClusterConditionRequirement `json:"clusterConditionMatch,omitempty"`
}
// ClusterConditionRequirement describes the Cluster condition requirement details.
type ClusterConditionRequirement struct {
// ConditionType specifies the ClusterStatus condition type.
// +required
ConditionType ConditionType `json:"conditionType"`
// Operator represents a conditionType's relationship to a conditionStatus.
// Valid operators are Equal, NotEqual.
//
// +kubebuilder:validation:Enum=Equal;NotEqual
// +required
Operator ClusterConditionOperator `json:"operator"`
// ConditionStatus specifies the ClusterStatue condition status.
// +required
ConditionStatus string `json:"conditionStatus"`
}
// ConditionType represents the detection ClusterStatus condition type.
type ConditionType string
const (
// ServiceDomainNameResolutionReady expresses the detection of the domain name resolution
// function of Service in the Kubernetes cluster.
ServiceDomainNameResolutionReady ConditionType = "ServiceDomainNameResolutionReady"
)
// ClusterConditionOperator is the set of operators that can be used in the cluster condition requirement.
type ClusterConditionOperator string
const (
// ClusterConditionEqual means equal match.
ClusterConditionEqual ClusterConditionOperator = "Equal"
// ClusterConditionNotEqual means not equal match.
ClusterConditionNotEqual ClusterConditionOperator = "NotEqual"
)
// ClusterAffinity represents the filter to select clusters.
type ClusterAffinity struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// RemedyAction represents the action type the remedy system needs to preform.
type RemedyAction string
const (
// TrafficControl indicates that the cluster requires traffic control.
TrafficControl RemedyAction = "TrafficControl"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemedyList contains a list of Remedy.
type RemedyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Remedy `json:"items"`
}

View File

@ -1,28 +0,0 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// ResourceKindRemedy is kind name of Remedy.
ResourceKindRemedy = "Remedy"
// ResourceSingularRemedy is singular name of Remedy.
ResourceSingularRemedy = "remedy"
// ResourcePluralRemedy is plural name of Remedy.
ResourcePluralRemedy = "remedies"
// ResourceNamespaceScopedRemedy indicates if Remedy is NamespaceScoped.
ResourceNamespaceScopedRemedy = false
)

View File

@ -1,177 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterAffinity) DeepCopyInto(out *ClusterAffinity) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAffinity.
func (in *ClusterAffinity) DeepCopy() *ClusterAffinity {
if in == nil {
return nil
}
out := new(ClusterAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterConditionRequirement) DeepCopyInto(out *ClusterConditionRequirement) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConditionRequirement.
func (in *ClusterConditionRequirement) DeepCopy() *ClusterConditionRequirement {
if in == nil {
return nil
}
out := new(ClusterConditionRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DecisionMatch) DeepCopyInto(out *DecisionMatch) {
*out = *in
if in.ClusterConditionMatch != nil {
in, out := &in.ClusterConditionMatch, &out.ClusterConditionMatch
*out = new(ClusterConditionRequirement)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecisionMatch.
func (in *DecisionMatch) DeepCopy() *DecisionMatch {
if in == nil {
return nil
}
out := new(DecisionMatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Remedy) DeepCopyInto(out *Remedy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Remedy.
func (in *Remedy) DeepCopy() *Remedy {
if in == nil {
return nil
}
out := new(Remedy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Remedy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemedyList) DeepCopyInto(out *RemedyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Remedy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemedyList.
func (in *RemedyList) DeepCopy() *RemedyList {
if in == nil {
return nil
}
out := new(RemedyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemedyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemedySpec) DeepCopyInto(out *RemedySpec) {
*out = *in
if in.ClusterAffinity != nil {
in, out := &in.ClusterAffinity, &out.ClusterAffinity
*out = new(ClusterAffinity)
(*in).DeepCopyInto(*out)
}
if in.DecisionMatches != nil {
in, out := &in.DecisionMatches, &out.DecisionMatches
*out = make([]DecisionMatch, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Actions != nil {
in, out := &in.Actions, &out.Actions
*out = make([]RemedyAction, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemedySpec.
func (in *RemedySpec) DeepCopy() *RemedySpec {
if in == nil {
return nil
}
out := new(RemedySpec)
in.DeepCopyInto(out)
return out
}

View File

@ -1,70 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "remedy.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Remedy{},
&RemedyList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -18,7 +18,6 @@ package install
import ( import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/search" "github.com/karmada-io/api/search"
@ -28,6 +27,6 @@ import (
// Install registers the API group and adds types to a scheme. // Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) { func Install(scheme *runtime.Scheme) {
utilruntime.Must(search.AddToScheme(scheme)) utilruntime.Must(search.AddToScheme(scheme))
utilruntime.Must(searchv1alpha1.Install(scheme)) utilruntime.Must(searchv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: searchv1alpha1.GroupVersion.Group, Version: searchv1alpha1.GroupVersion.Version})) utilruntime.Must(scheme.SetVersionPriority(searchv1alpha1.SchemeGroupVersion))
} }

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT. // Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package search package search

View File

@ -25,7 +25,7 @@ import (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=resourcebindings,scope=Namespaced,shortName=rb,categories={karmada-io} // +kubebuilder:resource:shortName=rb,categories={karmada-io}
// ResourceBinding represents a binding of a kubernetes resource with a propagation policy. // ResourceBinding represents a binding of a kubernetes resource with a propagation policy.
type ResourceBinding struct { type ResourceBinding struct {
@ -135,7 +135,7 @@ type ResourceBindingList struct {
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=clusterresourcebindings,scope="Cluster",shortName=crb,categories={karmada-io} // +kubebuilder:resource:scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy. // ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy.
@ -151,6 +151,7 @@ type ClusterResourceBinding struct {
Status ResourceBindingStatus `json:"status,omitempty"` Status ResourceBindingStatus `json:"status,omitempty"`
} }
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceBindingList contains a list of ClusterResourceBinding. // ClusterResourceBindingList contains a list of ClusterResourceBinding.

View File

@ -0,0 +1,34 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// ResourceBindingNamespaceLabel is added to objects to specify associated ResourceBinding's namespace.
ResourceBindingNamespaceLabel = "resourcebinding.karmada.io/namespace"
// ResourceBindingNameLabel is added to objects to specify associated ResourceBinding's name.
ResourceBindingNameLabel = "resourcebinding.karmada.io/name"
// ClusterResourceBindingLabel is added to objects to specify associated ClusterResourceBinding.
ClusterResourceBindingLabel = "clusterresourcebinding.karmada.io/name"
// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"
// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"
)

View File

@ -35,10 +35,9 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=works,scope=Namespaced,shortName=wk,categories={karmada-io} // +kubebuilder:resource:categories={karmada-io},shortName=wk
// +kubebuilder:printcolumn:JSONPath=`.spec.workload.manifests[*].kind`,name="WORKLOAD-KIND",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="Applied",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="APPLIED",type=string // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// Work defines a list of resources to be deployed on the member cluster. // Work defines a list of resources to be deployed on the member cluster.
type Work struct { type Work struct {
@ -57,20 +56,6 @@ type Work struct {
type WorkSpec struct { type WorkSpec struct {
// Workload represents the manifest workload to be deployed on managed cluster. // Workload represents the manifest workload to be deployed on managed cluster.
Workload WorkloadTemplate `json:"workload,omitempty"` Workload WorkloadTemplate `json:"workload,omitempty"`
// SuspendDispatching controls whether dispatching should
// be suspended, nil means not suspend.
// Note: true means stop propagating to the corresponding member cluster, and
// does not prevent status collection.
// +optional
SuspendDispatching *bool `json:"suspendDispatching,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member cluster when the Work object is deleted.
// If set to true, resources will be preserved on the member cluster.
// Default is false, which means resources will be deleted along with the Work object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
} }
// WorkloadTemplate represents the manifest workload to be deployed on managed cluster. // WorkloadTemplate represents the manifest workload to be deployed on managed cluster.
@ -160,8 +145,6 @@ const (
// WorkDegraded represents that the current state of Work does not match // WorkDegraded represents that the current state of Work does not match
// the desired state for a certain period. // the desired state for a certain period.
WorkDegraded string = "Degraded" WorkDegraded string = "Degraded"
// WorkDispatching represents the dispatching or suspension status of the Work resource
WorkDispatching string = "Dispatching"
) )
// ResourceHealth represents that the health status of the reference resource. // ResourceHealth represents that the health status of the reference resource.

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
@ -381,16 +365,6 @@ func (in *WorkList) DeepCopyObject() runtime.Object {
func (in *WorkSpec) DeepCopyInto(out *WorkSpec) { func (in *WorkSpec) DeepCopyInto(out *WorkSpec) {
*out = *in *out = *in
in.Workload.DeepCopyInto(&out.Workload) in.Workload.DeepCopyInto(&out.Workload)
if in.SuspendDispatching != nil {
in, out := &in.SuspendDispatching, &out.SuspendDispatching
*out = new(bool)
**out = **in
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
return return
} }

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )

View File

@ -48,11 +48,11 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:resource:path=resourcebindings,scope=Namespaced,shortName=rb,categories={karmada-io} // +kubebuilder:resource:shortName=rb,categories={karmada-io}
// +kubebuilder:storageversion // +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// ResourceBinding represents a binding of a kubernetes resource with a propagation policy. // ResourceBinding represents a binding of a kubernetes resource with a propagation policy.
type ResourceBinding struct { type ResourceBinding struct {
@ -136,33 +136,6 @@ type ResourceBindingSpec struct {
// +kubebuilder:validation:Enum=Abort;Overwrite // +kubebuilder:validation:Enum=Abort;Overwrite
// +optional // +optional
ConflictResolution policyv1alpha1.ConflictResolution `json:"conflictResolution,omitempty"` ConflictResolution policyv1alpha1.ConflictResolution `json:"conflictResolution,omitempty"`
// RescheduleTriggeredAt is a timestamp representing when the referenced resource is triggered rescheduling.
// When this field is updated, it means a rescheduling is manually triggered by user, and the expected behavior
// of this action is to do a complete recalculation without referring to last scheduling results.
// It works with the status.lastScheduledTime field, and only when this timestamp is later than timestamp in
// status.lastScheduledTime will the rescheduling actually execute, otherwise, ignored.
//
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
// +optional
RescheduleTriggeredAt *metav1.Time `json:"rescheduleTriggeredAt,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the binding object is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the binding object.
// This setting applies to all Work objects created under this binding object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority represents the scheduling priority assigned to workloads.
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
} }
// ObjectReference contains enough information to locate the referenced object inside current cluster. // ObjectReference contains enough information to locate the referenced object inside current cluster.
@ -202,14 +175,6 @@ type ReplicaRequirements struct {
// ResourceRequest represents the resources required by each replica. // ResourceRequest represents the resources required by each replica.
// +optional // +optional
ResourceRequest corev1.ResourceList `json:"resourceRequest,omitempty"` ResourceRequest corev1.ResourceList `json:"resourceRequest,omitempty"`
// Namespace represents the resources namespaces
// +optional
Namespace string `json:"namespace,omitempty"`
// PriorityClassName represents the resources priorityClassName
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
} }
// NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica. // NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica.
@ -244,13 +209,6 @@ type GracefulEvictionTask struct {
// +required // +required
FromCluster string `json:"fromCluster"` FromCluster string `json:"fromCluster"`
// PurgeMode represents how to deal with the legacy applications on the
// cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +optional
PurgeMode policyv1alpha1.PurgeMode `json:"purgeMode,omitempty"`
// Replicas indicates the number of replicas should be evicted. // Replicas indicates the number of replicas should be evicted.
// Should be ignored for resource type that doesn't have replica. // Should be ignored for resource type that doesn't have replica.
// +optional // +optional
@ -291,11 +249,6 @@ type GracefulEvictionTask struct {
// +optional // +optional
SuppressDeletion *bool `json:"suppressDeletion,omitempty"` SuppressDeletion *bool `json:"suppressDeletion,omitempty"`
// PreservedLabelState represents the application state information collected from the original cluster,
// and it will be injected into the new cluster in form of application labels.
// +optional
PreservedLabelState map[string]string `json:"preservedLabelState,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was // CreationTimestamp is a timestamp representing the server time when this object was
// created. // created.
// Clients should not set this value to avoid the time inconsistency issue. // Clients should not set this value to avoid the time inconsistency issue.
@ -303,10 +256,7 @@ type GracefulEvictionTask struct {
// //
// Populated by the system. Read-only. // Populated by the system. Read-only.
// +optional // +optional
CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"` CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty"`
// ClustersBeforeFailover records the clusters where running the application before failover.
ClustersBeforeFailover []string `json:"clustersBeforeFailover,omitempty"`
} }
// BindingSnapshot is a snapshot of a ResourceBinding or ClusterResourceBinding. // BindingSnapshot is a snapshot of a ResourceBinding or ClusterResourceBinding.
@ -326,31 +276,6 @@ type BindingSnapshot struct {
Clusters []TargetCluster `json:"clusters,omitempty"` Clusters []TargetCluster `json:"clusters,omitempty"`
} }
// Suspension defines the policy for suspending dispatching and scheduling.
type Suspension struct {
policyv1alpha1.Suspension `json:",inline"`
// Scheduling controls whether scheduling should be suspended, the scheduler will pause scheduling and not
// process resource binding when the value is true and resume scheduling when it's false or nil.
// This is designed for third-party systems to temporarily pause the scheduling of applications, which enabling
// manage resource allocation, prioritize critical workloads, etc.
// It is expected that third-party systems use an admission webhook to suspend scheduling at the time of
// ResourceBinding creation. Once a ResourceBinding has been scheduled, it cannot be paused afterward, as it may
// lead to ineffective suspension.
// +optional
Scheduling *bool `json:"scheduling,omitempty"`
}
// SchedulePriority represents the scheduling priority assigned to workloads.
type SchedulePriority struct {
// Priority specifies the scheduling priority for the binding.
// Higher values indicate a higher priority.
// If not explicitly set, the default value is 0.
// +kubebuilder:default=0
// +optional
Priority int32 `json:"priority,omitempty"`
}
// ResourceBindingStatus represents the overall status of the strategy as well as the referenced resources. // ResourceBindingStatus represents the overall status of the strategy as well as the referenced resources.
type ResourceBindingStatus struct { type ResourceBindingStatus struct {
// SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler. // SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler.
@ -364,11 +289,6 @@ type ResourceBindingStatus struct {
// +optional // +optional
SchedulerObservedAffinityName string `json:"schedulerObservingAffinityName,omitempty"` SchedulerObservedAffinityName string `json:"schedulerObservingAffinityName,omitempty"`
// LastScheduledTime representing the latest timestamp when scheduler successfully finished a scheduling.
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
// +optional
LastScheduledTime *metav1.Time `json:"lastScheduledTime,omitempty"`
// Conditions contain the different condition statuses. // Conditions contain the different condition statuses.
// +optional // +optional
Conditions []metav1.Condition `json:"conditions,omitempty"` Conditions []metav1.Condition `json:"conditions,omitempty"`
@ -463,12 +383,12 @@ const (
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=clusterresourcebindings,scope="Cluster",shortName=crb,categories={karmada-io} // +kubebuilder:resource:scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:storageversion // +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string // +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy. // ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy.
type ClusterResourceBinding struct { type ClusterResourceBinding struct {
@ -483,6 +403,7 @@ type ClusterResourceBinding struct {
Status ResourceBindingStatus `json:"status,omitempty"` Status ResourceBindingStatus `json:"status,omitempty"`
} }
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceBindingList contains a list of ClusterResourceBinding. // ClusterResourceBindingList contains a list of ClusterResourceBinding.

View File

@ -16,18 +16,13 @@ limitations under the License.
package v1alpha2 package v1alpha2
import policyv1alpha1 "github.com/karmada-io/api/policy/v1alpha1"
// TaskOptions represents options for GracefulEvictionTasks. // TaskOptions represents options for GracefulEvictionTasks.
type TaskOptions struct { type TaskOptions struct {
purgeMode policyv1alpha1.PurgeMode producer string
producer string reason string
reason string message string
message string gracePeriodSeconds *int32
gracePeriodSeconds *int32 suppressDeletion *bool
suppressDeletion *bool
preservedLabelState map[string]string
clustersBeforeFailover []string
} }
// Option configures a TaskOptions // Option configures a TaskOptions
@ -43,13 +38,6 @@ func NewTaskOptions(opts ...Option) *TaskOptions {
return &options return &options
} }
// WithPurgeMode sets the purgeMode for TaskOptions
func WithPurgeMode(purgeMode policyv1alpha1.PurgeMode) Option {
return func(o *TaskOptions) {
o.purgeMode = purgeMode
}
}
// WithProducer sets the producer for TaskOptions // WithProducer sets the producer for TaskOptions
func WithProducer(producer string) Option { func WithProducer(producer string) Option {
return func(o *TaskOptions) { return func(o *TaskOptions) {
@ -85,20 +73,6 @@ func WithSuppressDeletion(suppressDeletion *bool) Option {
} }
} }
// WithPreservedLabelState sets the preservedLabelState for TaskOptions
func WithPreservedLabelState(preservedLabelState map[string]string) Option {
return func(o *TaskOptions) {
o.preservedLabelState = preservedLabelState
}
}
// WithClustersBeforeFailover sets the clustersBeforeFailover for TaskOptions
func WithClustersBeforeFailover(clustersBeforeFailover []string) Option {
return func(o *TaskOptions) {
o.clustersBeforeFailover = clustersBeforeFailover
}
}
// TargetContains checks if specific cluster present on the target list. // TargetContains checks if specific cluster present on the target list.
func (s *ResourceBindingSpec) TargetContains(name string) bool { func (s *ResourceBindingSpec) TargetContains(name string) bool {
for i := range s.Clusters { for i := range s.Clusters {
@ -179,37 +153,15 @@ func (s *ResourceBindingSpec) GracefulEvictCluster(name string, options *TaskOpt
// build eviction task // build eviction task
evictingCluster := evictCluster.DeepCopy() evictingCluster := evictCluster.DeepCopy()
evictionTask := GracefulEvictionTask{ evictionTask := GracefulEvictionTask{
FromCluster: evictingCluster.Name, FromCluster: evictingCluster.Name,
PurgeMode: options.purgeMode, Reason: options.reason,
Reason: options.reason, Message: options.message,
Message: options.message, Producer: options.producer,
Producer: options.producer, GracePeriodSeconds: options.gracePeriodSeconds,
GracePeriodSeconds: options.gracePeriodSeconds, SuppressDeletion: options.suppressDeletion,
SuppressDeletion: options.suppressDeletion,
PreservedLabelState: options.preservedLabelState,
ClustersBeforeFailover: options.clustersBeforeFailover,
} }
if evictingCluster.Replicas > 0 { if evictingCluster.Replicas > 0 {
evictionTask.Replicas = &evictingCluster.Replicas evictionTask.Replicas = &evictingCluster.Replicas
} }
s.GracefulEvictionTasks = append(s.GracefulEvictionTasks, evictionTask) s.GracefulEvictionTasks = append(s.GracefulEvictionTasks, evictionTask)
} }
// SchedulingSuspended tells if the scheduling of ResourceBinding or
// ClusterResourceBinding is suspended.
func (s *ResourceBindingSpec) SchedulingSuspended() bool {
if s == nil || s.Suspension == nil || s.Suspension.Scheduling == nil {
return false
}
return *s.Suspension.Scheduling
}
// SchedulePriorityValue returns the scheduling priority declared
// by '.spec.SchedulePriority.Priority'.
func (s *ResourceBindingSpec) SchedulePriorityValue() int32 {
if s.SchedulePriority == nil {
return 0
}
return s.SchedulePriority.Priority
}

View File

@ -0,0 +1,371 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"reflect"
"testing"
"k8s.io/utils/pointer"
)
func TestResourceBindingSpec_TargetContains(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
Expect bool
}{
{
Name: "cluster present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m1",
Expect: true,
},
{
Name: "cluster not present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m3",
Expect: false,
},
{
Name: "cluster is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "",
Expect: false,
},
{
Name: "target list is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "m1",
Expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
if tc.Spec.TargetContains(tc.ClusterName) != tc.Expect {
t.Fatalf("expect: %v, but got: %v", tc.Expect, tc.Spec.TargetContains(tc.ClusterName))
}
})
}
}
func TestResourceBindingSpec_AssignedReplicasForCluster(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
ExpectReplicas int32
}{
{
Name: "returns valid replicas in case cluster present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "m1",
ExpectReplicas: 1,
},
{
Name: "returns 0 in case cluster not present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "non-exist",
ExpectReplicas: 0,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
got := tc.Spec.AssignedReplicasForCluster(tc.ClusterName)
if tc.ExpectReplicas != got {
t.Fatalf("expect: %d, but got: %d", tc.ExpectReplicas, got)
}
})
}
}
func TestResourceBindingSpec_RemoveCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
ClusterName string
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "no-exist",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from head",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m1",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m2",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m3"}}},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m3",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "na",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.RemoveCluster(tc.ClusterName)
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
})
}
}
func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
EvictEvent GracefulEvictionTask
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
EvictEvent: GracefulEvictionTask{FromCluster: "non-exist"},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
},
{
Name: "evict cluster from head",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m2",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m2",
Replicas: pointer.Int32(2),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m3",
Replicas: pointer.Int32(3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "eviction task should be appended to non-empty tasks",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{{FromCluster: "original-cluster"}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "original-cluster",
},
{
FromCluster: "m3",
Replicas: pointer.Int32(3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
{
Name: "same eviction task should not be appended multiple times",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v2",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.GracefulEvictCluster(tc.EvictEvent.FromCluster, NewTaskOptions(WithProducer(tc.EvictEvent.Producer), WithReason(tc.EvictEvent.Reason), WithMessage(tc.EvictEvent.Message)))
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect clusters: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
if !reflect.DeepEqual(tc.InputSpec.GracefulEvictionTasks, tc.ExpectSpec.GracefulEvictionTasks) {
t.Fatalf("expect tasks: %v, but got: %v", tc.ExpectSpec.GracefulEvictionTasks, tc.InputSpec.GracefulEvictionTasks)
}
})
}
}
func TestResourceBindingSpec_ClusterInGracefulEvictionTasks(t *testing.T) {
gracefulEvictionTasks := []GracefulEvictionTask{
{
FromCluster: "member1",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
{
FromCluster: "member2",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
}
tests := []struct {
name string
InputSpec ResourceBindingSpec
targetCluster string
expect bool
}{
{
name: "targetCluster is in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member1",
expect: true,
},
{
name: "targetCluster is not in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member3",
expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.name, func(t *testing.T) {
result := tc.InputSpec.ClusterInGracefulEvictionTasks(tc.targetCluster)
if result != tc.expect {
t.Errorf("expected: %v, but got: %v", tc.expect, result)
}
})
}
}

View File

@ -34,12 +34,31 @@ const (
// WorkPermanentIDLabel is the ID of Work object. // WorkPermanentIDLabel is the ID of Work object.
WorkPermanentIDLabel = "work.karmada.io/permanent-id" WorkPermanentIDLabel = "work.karmada.io/permanent-id"
// ResourceBindingUIDLabel is the UID of ResourceBinding object.
ResourceBindingUIDLabel = "resourcebinding.karmada.io/uid"
// ClusterResourceBindingUIDLabel is the uid of ClusterResourceBinding object.
ClusterResourceBindingUIDLabel = "clusterresourcebinding.karmada.io/uid"
// WorkNamespaceAnnotation is added to objects to specify associated Work's namespace. // WorkNamespaceAnnotation is added to objects to specify associated Work's namespace.
WorkNamespaceAnnotation = "work.karmada.io/namespace" WorkNamespaceAnnotation = "work.karmada.io/namespace"
// WorkNameAnnotation is added to objects to specify associated Work's name. // WorkNameAnnotation is added to objects to specify associated Work's name.
WorkNameAnnotation = "work.karmada.io/name" WorkNameAnnotation = "work.karmada.io/name"
// WorkUIDLabel is the uid of Work object.
WorkUIDLabel = "work.karmada.io/uid"
// ResourceBindingReferenceKey is the key of ResourceBinding object.
// It is usually a unique hash value of ResourceBinding object's namespace and name, intended to be added to the Work object.
// It will be used to retrieve all Works objects that derived from a specific ResourceBinding object.
ResourceBindingReferenceKey = "resourcebinding.karmada.io/key"
// ClusterResourceBindingReferenceKey is the key of ClusterResourceBinding object.
// It is usually a unique hash value of ClusterResourceBinding object's namespace and name, intended to be added to the Work object.
// It will be used to retrieve all Works objects that derived by a specific ClusterResourceBinding object.
ClusterResourceBindingReferenceKey = "clusterresourcebinding.karmada.io/key"
// ResourceBindingNamespaceAnnotationKey is added to object to describe the associated ResourceBinding's namespace. // ResourceBindingNamespaceAnnotationKey is added to object to describe the associated ResourceBinding's namespace.
// It is added to: // It is added to:
// - Work object: describes the namespace of ResourceBinding which the Work derived from. // - Work object: describes the namespace of ResourceBinding which the Work derived from.
@ -58,19 +77,11 @@ const (
// - Manifest in Work object: describes the name of ClusterResourceBinding which the manifest derived from. // - Manifest in Work object: describes the name of ClusterResourceBinding which the manifest derived from.
ClusterResourceBindingAnnotationKey = "clusterresourcebinding.karmada.io/name" ClusterResourceBindingAnnotationKey = "clusterresourcebinding.karmada.io/name"
// BindingManagedByLabel is added to ResourceBinding to represent what kind of resource manages this Binding. // WorkNamespaceLabel is added to objects to specify associated Work's namespace.
BindingManagedByLabel = "binding.karmada.io/managed-by" WorkNamespaceLabel = "work.karmada.io/namespace"
// ResourceTemplateGenerationAnnotationKey records the generation of resource template in Karmada APIServer, // WorkNameLabel is added to objects to specify associated Work's name.
// It will be injected into the resource when propagating to member clusters, to denote the specific version of WorkNameLabel = "work.karmada.io/name"
// the resource template from which the resource is derived. It might be helpful in the following cases:
// 1. Facilitating observation from member clusters to ascertain if the most recent resource template has been
// completely synced.
// 2. The annotation will be synced back to Karmada during the process of syncing resource status,
// by leveraging this annotation, Karmada can infer if the most recent resource template has been completely
// synced on member clusters, then generates accurate observed generation(like Deployment's .status.observedGeneration)
// which might be required by the release system.
ResourceTemplateGenerationAnnotationKey = "resourcetemplate.karmada.io/generation"
) )
// Define resource conflict resolution // Define resource conflict resolution

View File

@ -1,22 +1,6 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT. // Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2 package v1alpha2
@ -149,22 +133,7 @@ func (in *GracefulEvictionTask) DeepCopyInto(out *GracefulEvictionTask) {
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} }
if in.PreservedLabelState != nil { in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
in, out := &in.PreservedLabelState, &out.PreservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.CreationTimestamp != nil {
in, out := &in.CreationTimestamp, &out.CreationTimestamp
*out = (*in).DeepCopy()
}
if in.ClustersBeforeFailover != nil {
in, out := &in.ClustersBeforeFailover, &out.ClustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }
@ -356,25 +325,6 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) {
*out = new(v1alpha1.FailoverBehavior) *out = new(v1alpha1.FailoverBehavior)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.RescheduleTriggeredAt != nil {
in, out := &in.RescheduleTriggeredAt, &out.RescheduleTriggeredAt
*out = (*in).DeepCopy()
}
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return return
} }
@ -391,10 +341,6 @@ func (in *ResourceBindingSpec) DeepCopy() *ResourceBindingSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceBindingStatus) DeepCopyInto(out *ResourceBindingStatus) { func (in *ResourceBindingStatus) DeepCopyInto(out *ResourceBindingStatus) {
*out = *in *out = *in
if in.LastScheduledTime != nil {
in, out := &in.LastScheduledTime, &out.LastScheduledTime
*out = (*in).DeepCopy()
}
if in.Conditions != nil { if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in)) *out = make([]metav1.Condition, len(*in))
@ -422,44 +368,6 @@ func (in *ResourceBindingStatus) DeepCopy() *ResourceBindingStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
in.Suspension.DeepCopyInto(&out.Suspension)
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetCluster) DeepCopyInto(out *TargetCluster) { func (in *TargetCluster) DeepCopyInto(out *TargetCluster) {
*out = *in *out = *in
@ -489,18 +397,6 @@ func (in *TaskOptions) DeepCopyInto(out *TaskOptions) {
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} }
if in.preservedLabelState != nil {
in, out := &in.preservedLabelState, &out.preservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.clustersBeforeFailover != nil {
in, out := &in.clustersBeforeFailover, &out.clustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@ -1,30 +1,11 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT. // Code generated by register-gen. DO NOT EDIT.
package v1alpha2 package v1alpha2
import ( import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// GroupName specifies the group name used to register the objects. // GroupName specifies the group name used to register the objects.
@ -46,7 +27,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead // Depreciated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme
) )