Compare commits

...

18 Commits
v1.6.0 ... main

Author SHA1 Message Date
karmada-bot 2a2d22adf4
Merge pull request #30 from RainbowMango/pr_sync_140
Sync APIs from karmada repo based on v1.14.0
2025-06-23 09:35:00 +08:00
RainbowMango 518c40cffc Sync APIs from karmada repo based on v1.14.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-06-20 12:36:27 +08:00
karmada-bot 4ff33322f2
Merge pull request #28 from RainbowMango/pr_add_code_of_conduct
Add Karmada code of conduct
2025-03-10 15:59:31 +08:00
RainbowMango 86df91cac5 Add Karmada code of conduct
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-10 15:57:31 +08:00
karmada-bot 23fd016454
Merge pull request #26 from RainbowMango/pr_sync_130
Sync APIs from karmada repo based on v1.13.0
2025-03-07 17:08:29 +08:00
RainbowMango 2dd201275c Sync APIs from karmada repo based on v1.13.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-07 15:43:28 +08:00
karmada-bot 0a396ea23e
Merge pull request #25 from RainbowMango/pr_sync_120
Sync APIs from karmada repo based on v1.12.0
2025-03-06 14:36:28 +08:00
RainbowMango bd61308fb9 Sync APIs from karmada repo based on v1.12.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-04 20:16:48 +08:00
karmada-bot 909667dcc9
Merge pull request #24 from RainbowMango/pr_sync_111
Sync APIs from karmada repo based on v1.11.0
2024-10-12 11:01:22 +08:00
RainbowMango 76d6ebe8b2 Sync APIs from karmada repo based on v1.11.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2024-10-11 11:46:54 +08:00
karmada-bot 5e54eda363
Merge pull request #23 from RainbowMango/pr_sync_110
Sync APIs from karmada repo based on v1.10.0
2024-06-27 19:01:52 +08:00
RainbowMango d7949bf20d Sync APIs from karmada repo based on v1.10.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2024-06-26 17:27:47 +08:00
karmada-bot d3650cd3a8
Merge pull request #22 from RainbowMango/pr_sync_19
Sync APIs from karmada repo based on v1.9.0
2024-05-21 10:27:20 +08:00
RainbowMango 24f3cd9e55 Sync APIs from karmada repo based on v1.9.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2024-05-15 11:24:53 +08:00
karmada-bot 81a2cd59ba
Merge pull request #21 from RainbowMango/pr_sync_18
Sync APIs from karmada repo based on v1.8.0
2023-12-09 11:57:03 +08:00
RainbowMango 215e9860d1 Sync APIs from karmada repo based on v1.8.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2023-12-08 20:40:59 +08:00
karmada-bot 1eebc5466b
Merge pull request #20 from RainbowMango/pr_sync_17
Sync APIs from karmada repo based on v1.7.0
2023-11-24 14:22:50 +08:00
RainbowMango eb44fe6db0 Sync APIs from karmada repo based on v1.7.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2023-09-05 15:27:55 +08:00
89 changed files with 4868 additions and 1416 deletions

3
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,3 @@
# Karmada Community Code of Conduct
Please refer to our [Karmada Community Code of Conduct](https://github.com/karmada-io/community/blob/main/CODE_OF_CONDUCT.md).

21
apps/v1alpha1/doc.go Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
// +groupName=apps.karmada.io
package v1alpha1

View File

@ -0,0 +1,151 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
const (
// ResourceKindWorkloadRebalancer is kind name of WorkloadRebalancer.
ResourceKindWorkloadRebalancer = "WorkloadRebalancer"
// ResourceSingularWorkloadRebalancer is singular name of WorkloadRebalancer.
ResourceSingularWorkloadRebalancer = "workloadrebalancer"
// ResourcePluralWorkloadRebalancer is kind plural name of WorkloadRebalancer.
ResourcePluralWorkloadRebalancer = "workloadrebalancers"
// ResourceNamespaceScopedWorkloadRebalancer indicates if WorkloadRebalancer is NamespaceScoped.
ResourceNamespaceScopedWorkloadRebalancer = false
)
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:path=workloadrebalancers,scope="Cluster"
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadRebalancer represents the desired behavior and status of a job which can enforces a resource rebalance.
type WorkloadRebalancer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the specification of the desired behavior of WorkloadRebalancer.
// +required
Spec WorkloadRebalancerSpec `json:"spec"`
// Status represents the status of WorkloadRebalancer.
// +optional
Status WorkloadRebalancerStatus `json:"status,omitempty"`
}
// WorkloadRebalancerSpec represents the specification of the desired behavior of Reschedule.
type WorkloadRebalancerSpec struct {
// Workloads used to specify the list of expected resource.
// Nil or empty list is not allowed.
// +kubebuilder:validation:MinItems=1
// +required
Workloads []ObjectReference `json:"workloads"`
// TTLSecondsAfterFinished limits the lifetime of a WorkloadRebalancer that has finished execution (means each
// target workload is finished with result of Successful or Failed).
// If this field is set, ttlSecondsAfterFinished after the WorkloadRebalancer finishes, it is eligible to be automatically deleted.
// If this field is unset, the WorkloadRebalancer won't be automatically deleted.
// If this field is set to zero, the WorkloadRebalancer becomes eligible to be deleted immediately after it finishes.
// +optional
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
}
// ObjectReference the expected resource.
type ObjectReference struct {
// APIVersion represents the API version of the target resource.
// +required
APIVersion string `json:"apiVersion"`
// Kind represents the Kind of the target resource.
// +required
Kind string `json:"kind"`
// Name of the target resource.
// +required
Name string `json:"name"`
// Namespace of the target resource.
// Default is empty, which means it is a non-namespacescoped resource.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// WorkloadRebalancerStatus contains information about the current status of a WorkloadRebalancer
// updated periodically by schedule trigger controller.
type WorkloadRebalancerStatus struct {
// ObservedWorkloads contains information about the execution states and messages of target resources.
// +optional
ObservedWorkloads []ObservedWorkload `json:"observedWorkloads,omitempty"`
// ObservedGeneration is the generation(.metadata.generation) observed by the controller.
// If ObservedGeneration is less than the generation in metadata means the controller hasn't confirmed
// the rebalance result or hasn't done the rebalance yet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// FinishTime represents the finish time of rebalancer.
// +optional
FinishTime *metav1.Time `json:"finishTime,omitempty"`
}
// ObservedWorkload the observed resource.
type ObservedWorkload struct {
// Workload the observed resource.
// +required
Workload ObjectReference `json:"workload"`
// Result the observed rebalance result of resource.
// +optional
Result RebalanceResult `json:"result,omitempty"`
// Reason represents a machine-readable description of why this resource rebalanced failed.
// +optional
Reason RebalanceFailedReason `json:"reason,omitempty"`
}
// RebalanceResult the specific extent to which the resource has been rebalanced
type RebalanceResult string
const (
// RebalanceFailed the resource has been rebalance failed.
RebalanceFailed RebalanceResult = "Failed"
// RebalanceSuccessful the resource has been successfully rebalanced.
RebalanceSuccessful RebalanceResult = "Successful"
)
// RebalanceFailedReason represents a machine-readable description of why this resource rebalanced failed.
type RebalanceFailedReason string
const (
// RebalanceObjectNotFound the resource referenced binding not found.
RebalanceObjectNotFound RebalanceFailedReason = "ReferencedBindingNotFound"
)
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WorkloadRebalancerList contains a list of WorkloadRebalancer
type WorkloadRebalancerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// Items holds a list of WorkloadRebalancer.
Items []WorkloadRebalancer `json:"items"`
}

View File

@ -0,0 +1,171 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
func (in *ObjectReference) DeepCopy() *ObjectReference {
if in == nil {
return nil
}
out := new(ObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObservedWorkload) DeepCopyInto(out *ObservedWorkload) {
*out = *in
out.Workload = in.Workload
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedWorkload.
func (in *ObservedWorkload) DeepCopy() *ObservedWorkload {
if in == nil {
return nil
}
out := new(ObservedWorkload)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancer) DeepCopyInto(out *WorkloadRebalancer) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancer.
func (in *WorkloadRebalancer) DeepCopy() *WorkloadRebalancer {
if in == nil {
return nil
}
out := new(WorkloadRebalancer)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadRebalancer) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerList) DeepCopyInto(out *WorkloadRebalancerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]WorkloadRebalancer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerList.
func (in *WorkloadRebalancerList) DeepCopy() *WorkloadRebalancerList {
if in == nil {
return nil
}
out := new(WorkloadRebalancerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadRebalancerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerSpec) DeepCopyInto(out *WorkloadRebalancerSpec) {
*out = *in
if in.Workloads != nil {
in, out := &in.Workloads, &out.Workloads
*out = make([]ObjectReference, len(*in))
copy(*out, *in)
}
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerSpec.
func (in *WorkloadRebalancerSpec) DeepCopy() *WorkloadRebalancerSpec {
if in == nil {
return nil
}
out := new(WorkloadRebalancerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadRebalancerStatus) DeepCopyInto(out *WorkloadRebalancerStatus) {
*out = *in
if in.ObservedWorkloads != nil {
in, out := &in.ObservedWorkloads, &out.ObservedWorkloads
*out = make([]ObservedWorkload, len(*in))
copy(*out, *in)
}
if in.FinishTime != nil {
in, out := &in.FinishTime, &out.FinishTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadRebalancerStatus.
func (in *WorkloadRebalancerStatus) DeepCopy() *WorkloadRebalancerStatus {
if in == nil {
return nil
}
out := new(WorkloadRebalancerStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,70 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "apps.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&WorkloadRebalancer{},
&WorkloadRebalancerList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,221 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=cronfederatedhpas,scope=Namespaced,shortName=cronfhpa,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name=`AGE`,type=date
// CronFederatedHPA represents a collection of repeating schedule to scale
// replica number of a specific workload. It can scale any resource implementing
// the scale subresource as well as FederatedHPA.
type CronFederatedHPA struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec is the specification of the CronFederatedHPA.
// +required
Spec CronFederatedHPASpec `json:"spec"`
// Status is the current status of the CronFederatedHPA.
// +optional
Status CronFederatedHPAStatus `json:"status"`
}
// CronFederatedHPASpec is the specification of the CronFederatedHPA.
type CronFederatedHPASpec struct {
// ScaleTargetRef points to the target resource to scale.
// Target resource could be any resource that implementing the scale
// subresource like Deployment, or FederatedHPA.
// +required
ScaleTargetRef autoscalingv2.CrossVersionObjectReference `json:"scaleTargetRef"`
// Rules contains a collection of schedules that declares when and how
// the referencing target resource should be scaled.
// +required
Rules []CronFederatedHPARule `json:"rules"`
}
// CronFederatedHPARule declares a schedule as well as scale actions.
type CronFederatedHPARule struct {
// Name of the rule.
// Each rule in a CronFederatedHPA must have a unique name.
//
// Note: the name will be used as an identifier to record its execution
// history. Changing the name will be considered as deleting the old rule
// and adding a new rule, that means the original execution history will be
// discarded.
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=32
// +required
Name string `json:"name"`
// Schedule is the cron expression that represents a periodical time.
// The syntax follows https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax.
// +required
Schedule string `json:"schedule"`
// TargetReplicas is the target replicas to be scaled for resources
// referencing by ScaleTargetRef of this CronFederatedHPA.
// Only needed when referencing resource is not FederatedHPA.
// +optional
TargetReplicas *int32 `json:"targetReplicas,omitempty"`
// TargetMinReplicas is the target MinReplicas to be set for FederatedHPA.
// Only needed when referencing resource is FederatedHPA.
// TargetMinReplicas and TargetMaxReplicas can be specified together or
// either one can be specified alone.
// nil means the MinReplicas(.spec.minReplicas) of the referencing FederatedHPA
// will not be updated.
// +optional
TargetMinReplicas *int32 `json:"targetMinReplicas,omitempty"`
// TargetMaxReplicas is the target MaxReplicas to be set for FederatedHPA.
// Only needed when referencing resource is FederatedHPA.
// TargetMinReplicas and TargetMaxReplicas can be specified together or
// either one can be specified alone.
// nil means the MaxReplicas(.spec.maxReplicas) of the referencing FederatedHPA
// will not be updated.
// +optional
TargetMaxReplicas *int32 `json:"targetMaxReplicas,omitempty"`
// Suspend tells the controller to suspend subsequent executions.
// Defaults to false.
//
// +kubebuilder:default=false
// +optional
Suspend *bool `json:"suspend,omitempty"`
// TimeZone for the giving schedule.
// If not specified, this will default to the time zone of the
// karmada-controller-manager process.
// Invalid TimeZone will be rejected when applying by karmada-webhook.
// see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones for the
// all timezones.
// +optional
TimeZone *string `json:"timeZone,omitempty"`
// SuccessfulHistoryLimit represents the count of successful execution items
// for each rule.
// The value must be a positive integer. It defaults to 3.
//
// +kubebuilder:default=3
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=32
// +optional
SuccessfulHistoryLimit *int32 `json:"successfulHistoryLimit,omitempty"`
// FailedHistoryLimit represents the count of failed execution items for
// each rule.
// The value must be a positive integer. It defaults to 3.
//
// +kubebuilder:default=3
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=32
FailedHistoryLimit *int32 `json:"failedHistoryLimit,omitempty"`
}
// CronFederatedHPAStatus represents the current status of a CronFederatedHPA.
type CronFederatedHPAStatus struct {
// ExecutionHistories record the execution histories of CronFederatedHPARule.
// +optional
ExecutionHistories []ExecutionHistory `json:"executionHistories,omitempty"`
}
// ExecutionHistory records the execution history of specific CronFederatedHPARule.
type ExecutionHistory struct {
// RuleName is the name of the CronFederatedHPARule.
// +required
RuleName string `json:"ruleName"`
// NextExecutionTime is the next time to execute.
// Nil means the rule has been suspended.
// +optional
NextExecutionTime *metav1.Time `json:"nextExecutionTime,omitempty"`
// SuccessfulExecutions records successful executions.
// +optional
SuccessfulExecutions []SuccessfulExecution `json:"successfulExecutions,omitempty"`
// FailedExecutions records failed executions.
// +optional
FailedExecutions []FailedExecution `json:"failedExecutions,omitempty"`
}
// SuccessfulExecution records a successful execution.
type SuccessfulExecution struct {
// ScheduleTime is the expected execution time declared in CronFederatedHPARule.
// +required
ScheduleTime *metav1.Time `json:"scheduleTime"`
// ExecutionTime is the actual execution time of CronFederatedHPARule.
// Tasks may not always be executed at ScheduleTime. ExecutionTime is used
// to evaluate the efficiency of the controller's execution.
// +required
ExecutionTime *metav1.Time `json:"executionTime"`
// AppliedReplicas is the replicas have been applied.
// It is required if .spec.rules[*].targetReplicas is not empty.
// +optional
AppliedReplicas *int32 `json:"appliedReplicas,omitempty"`
// AppliedMaxReplicas is the MaxReplicas have been applied.
// It is required if .spec.rules[*].targetMaxReplicas is not empty.
// +optional
AppliedMaxReplicas *int32 `json:"appliedMaxReplicas,omitempty"`
// AppliedMinReplicas is the MinReplicas have been applied.
// It is required if .spec.rules[*].targetMinReplicas is not empty.
// +optional
AppliedMinReplicas *int32 `json:"appliedMinReplicas,omitempty"`
}
// FailedExecution records a failed execution.
type FailedExecution struct {
// ScheduleTime is the expected execution time declared in CronFederatedHPARule.
// +required
ScheduleTime *metav1.Time `json:"scheduleTime"`
// ExecutionTime is the actual execution time of CronFederatedHPARule.
// Tasks may not always be executed at ScheduleTime. ExecutionTime is used
// to evaluate the efficiency of the controller's execution.
// +required
ExecutionTime *metav1.Time `json:"executionTime"`
// Message is the human-readable message indicating details about the failure.
// +required
Message string `json:"message"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CronFederatedHPAList contains a list of CronFederatedHPA.
type CronFederatedHPAList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CronFederatedHPA `json:"items"`
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,3 +1,19 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -8,7 +24,13 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=fhpa,categories={karmada-io}
// +kubebuilder:resource:path=federatedhpas,scope=Namespaced,shortName=fhpa,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.kind`,name=`REFERENCE-KIND`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.scaleTargetRef.name`,name=`REFERENCE-NAME`,type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.minReplicas`,name=`MINPODS`,type=integer
// +kubebuilder:printcolumn:JSONPath=`.spec.maxReplicas`,name=`MAXPODS`,type=integer
// +kubebuilder:printcolumn:JSONPath=`.status.currentReplicas`,name=`REPLICAS`,type=integer
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name=`AGE`,type=date
// FederatedHPA is centralized HPA that can aggregate the metrics in multiple clusters.
// When the system load increases, it will query the metrics from multiple clusters and scales up the replicas.
@ -18,7 +40,7 @@ type FederatedHPA struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec is the specification of the FederatedHPASpec.
// Spec is the specification of the FederatedHPA.
// +required
Spec FederatedHPASpec `json:"spec"`
@ -35,17 +57,16 @@ type FederatedHPASpec struct {
// +required
ScaleTargetRef autoscalingv2.CrossVersionObjectReference `json:"scaleTargetRef"`
// minReplicas is the lower limit for the number of replicas to which the autoscaler
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
// metric is configured. Scaling is active as long as at least one metric value is
// available.
// MinReplicas is the lower limit for the number of replicas to which the
// autoscaler can scale down.
// It defaults to 1 pod.
// +optional
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
MinReplicas *int32 `json:"minReplicas,omitempty"`
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// MaxReplicas is the upper limit for the number of replicas to which the
// autoscaler can scale up.
// It cannot be less that minReplicas.
MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
MaxReplicas int32 `json:"maxReplicas"`
// Metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will

View File

@ -0,0 +1,42 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io
FederatedHPAKind = "FederatedHPA"
// QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
// record the query source cluster
QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
// ResourceSingularFederatedHPA is singular name of FederatedHPA.
ResourceSingularFederatedHPA = "federatedhpa"
// ResourcePluralFederatedHPA is plural name of FederatedHPA.
ResourcePluralFederatedHPA = "federatedhpas"
// ResourceNamespaceScopedFederatedHPA is the scope of the FederatedHPA
ResourceNamespaceScopedFederatedHPA = true
// ResourceKindCronFederatedHPA is kind name of CronFederatedHPA.
ResourceKindCronFederatedHPA = "CronFederatedHPA"
// ResourceSingularCronFederatedHPA is singular name of CronFederatedHPA.
ResourceSingularCronFederatedHPA = "cronfederatedhpa"
// ResourcePluralCronFederatedHPA is plural name of CronFederatedHPA.
ResourcePluralCronFederatedHPA = "cronfederatedhpas"
// ResourceNamespaceScopedCronFederatedHPA is the scope of the CronFederatedHPA
ResourceNamespaceScopedCronFederatedHPA = true
)

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -10,6 +26,223 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronFederatedHPA) DeepCopyInto(out *CronFederatedHPA) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronFederatedHPA.
func (in *CronFederatedHPA) DeepCopy() *CronFederatedHPA {
if in == nil {
return nil
}
out := new(CronFederatedHPA)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronFederatedHPA) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronFederatedHPAList) DeepCopyInto(out *CronFederatedHPAList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CronFederatedHPA, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronFederatedHPAList.
func (in *CronFederatedHPAList) DeepCopy() *CronFederatedHPAList {
if in == nil {
return nil
}
out := new(CronFederatedHPAList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronFederatedHPAList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronFederatedHPARule) DeepCopyInto(out *CronFederatedHPARule) {
*out = *in
if in.TargetReplicas != nil {
in, out := &in.TargetReplicas, &out.TargetReplicas
*out = new(int32)
**out = **in
}
if in.TargetMinReplicas != nil {
in, out := &in.TargetMinReplicas, &out.TargetMinReplicas
*out = new(int32)
**out = **in
}
if in.TargetMaxReplicas != nil {
in, out := &in.TargetMaxReplicas, &out.TargetMaxReplicas
*out = new(int32)
**out = **in
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
if in.TimeZone != nil {
in, out := &in.TimeZone, &out.TimeZone
*out = new(string)
**out = **in
}
if in.SuccessfulHistoryLimit != nil {
in, out := &in.SuccessfulHistoryLimit, &out.SuccessfulHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedHistoryLimit != nil {
in, out := &in.FailedHistoryLimit, &out.FailedHistoryLimit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronFederatedHPARule.
func (in *CronFederatedHPARule) DeepCopy() *CronFederatedHPARule {
if in == nil {
return nil
}
out := new(CronFederatedHPARule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronFederatedHPASpec) DeepCopyInto(out *CronFederatedHPASpec) {
*out = *in
out.ScaleTargetRef = in.ScaleTargetRef
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]CronFederatedHPARule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronFederatedHPASpec.
func (in *CronFederatedHPASpec) DeepCopy() *CronFederatedHPASpec {
if in == nil {
return nil
}
out := new(CronFederatedHPASpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronFederatedHPAStatus) DeepCopyInto(out *CronFederatedHPAStatus) {
*out = *in
if in.ExecutionHistories != nil {
in, out := &in.ExecutionHistories, &out.ExecutionHistories
*out = make([]ExecutionHistory, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronFederatedHPAStatus.
func (in *CronFederatedHPAStatus) DeepCopy() *CronFederatedHPAStatus {
if in == nil {
return nil
}
out := new(CronFederatedHPAStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecutionHistory) DeepCopyInto(out *ExecutionHistory) {
*out = *in
if in.NextExecutionTime != nil {
in, out := &in.NextExecutionTime, &out.NextExecutionTime
*out = (*in).DeepCopy()
}
if in.SuccessfulExecutions != nil {
in, out := &in.SuccessfulExecutions, &out.SuccessfulExecutions
*out = make([]SuccessfulExecution, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailedExecutions != nil {
in, out := &in.FailedExecutions, &out.FailedExecutions
*out = make([]FailedExecution, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionHistory.
func (in *ExecutionHistory) DeepCopy() *ExecutionHistory {
if in == nil {
return nil
}
out := new(ExecutionHistory)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedExecution) DeepCopyInto(out *FailedExecution) {
*out = *in
if in.ScheduleTime != nil {
in, out := &in.ScheduleTime, &out.ScheduleTime
*out = (*in).DeepCopy()
}
if in.ExecutionTime != nil {
in, out := &in.ExecutionTime, &out.ExecutionTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedExecution.
func (in *FailedExecution) DeepCopy() *FailedExecution {
if in == nil {
return nil
}
out := new(FailedExecution)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FederatedHPA) DeepCopyInto(out *FederatedHPA) {
*out = *in
@ -104,3 +337,42 @@ func (in *FederatedHPASpec) DeepCopy() *FederatedHPASpec {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessfulExecution) DeepCopyInto(out *SuccessfulExecution) {
*out = *in
if in.ScheduleTime != nil {
in, out := &in.ScheduleTime, &out.ScheduleTime
*out = (*in).DeepCopy()
}
if in.ExecutionTime != nil {
in, out := &in.ExecutionTime, &out.ExecutionTime
*out = (*in).DeepCopy()
}
if in.AppliedReplicas != nil {
in, out := &in.AppliedReplicas, &out.AppliedReplicas
*out = new(int32)
**out = **in
}
if in.AppliedMaxReplicas != nil {
in, out := &in.AppliedMaxReplicas, &out.AppliedMaxReplicas
*out = new(int32)
**out = **in
}
if in.AppliedMinReplicas != nil {
in, out := &in.AppliedMinReplicas, &out.AppliedMinReplicas
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessfulExecution.
func (in *SuccessfulExecution) DeepCopy() *SuccessfulExecution {
if in == nil {
return nil
}
out := new(SuccessfulExecution)
in.DeepCopyInto(out)
return out
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
@ -42,6 +61,8 @@ func init() {
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CronFederatedHPA{},
&CronFederatedHPAList{},
&FederatedHPA{},
&FederatedHPAList{},
)

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cluster is the internal version of the API.
// +k8s:deepcopy-gen=package
// +groupName=cluster.karmada.io

View File

@ -1,7 +1,24 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/cluster"
@ -11,6 +28,6 @@ import (
// Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) {
utilruntime.Must(cluster.AddToScheme(scheme))
utilruntime.Must(clusterv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(clusterv1alpha1.SchemeGroupVersion))
utilruntime.Must(clusterv1alpha1.Install(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: clusterv1alpha1.GroupVersion.Group, Version: clusterv1alpha1.GroupVersion.Version}))
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mutation
import (
@ -18,11 +34,20 @@ const (
// MutateCluster mutates required fields of the Cluster.
func MutateCluster(cluster *clusterapis.Cluster) {
MutateClusterTaints(cluster.Spec.Taints)
mutateClusterTaints(cluster.Spec.Taints)
migrateZoneToZones(cluster)
}
// MutateClusterTaints add TimeAdded field for cluster NoExecute taints only if TimeAdded not set.
func MutateClusterTaints(taints []corev1.Taint) {
// migrateZoneToZones add zones field for cluster if Zones not set but Zone set only.
func migrateZoneToZones(cluster *clusterapis.Cluster) {
if cluster.Spec.Zone != "" && len(cluster.Spec.Zones) == 0 {
cluster.Spec.Zones = append(cluster.Spec.Zones, cluster.Spec.Zone)
cluster.Spec.Zone = ""
}
}
// mutateClusterTaints add TimeAdded field for cluster NoExecute taints only if TimeAdded not set.
func mutateClusterTaints(taints []corev1.Taint) {
for i := range taints {
if taints[i].Effect == corev1.TaintEffectNoExecute && taints[i].TimeAdded == nil {
now := metav1.Now()
@ -59,12 +84,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 0,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(0, resource.BinarySI),
Max: *resource.NewQuantity(4*GB, resource.BinarySI),
},
@ -74,12 +99,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(4*GB, resource.BinarySI),
Max: *resource.NewQuantity(16*GB, resource.BinarySI),
},
@ -89,12 +114,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(4, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(16*GB, resource.BinarySI),
Max: *resource.NewQuantity(32*GB, resource.BinarySI),
},
@ -104,12 +129,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 3,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(4, resource.DecimalSI),
Max: *resource.NewQuantity(8, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(32*GB, resource.BinarySI),
Max: *resource.NewQuantity(64*GB, resource.BinarySI),
},
@ -119,12 +144,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 4,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(8, resource.DecimalSI),
Max: *resource.NewQuantity(16, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(64*GB, resource.BinarySI),
Max: *resource.NewQuantity(128*GB, resource.BinarySI),
},
@ -134,12 +159,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 5,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(16, resource.DecimalSI),
Max: *resource.NewQuantity(32, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(128*GB, resource.BinarySI),
Max: *resource.NewQuantity(256*GB, resource.BinarySI),
},
@ -149,12 +174,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 6,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(32, resource.DecimalSI),
Max: *resource.NewQuantity(64, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(256*GB, resource.BinarySI),
Max: *resource.NewQuantity(512*GB, resource.BinarySI),
},
@ -164,12 +189,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 7,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(64, resource.DecimalSI),
Max: *resource.NewQuantity(128, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(512*GB, resource.BinarySI),
Max: *resource.NewQuantity(1024*GB, resource.BinarySI),
},
@ -179,12 +204,12 @@ func SetDefaultClusterResourceModels(cluster *clusterapis.Cluster) {
Grade: 8,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(128, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(1024*GB, resource.BinarySI),
Max: *resource.NewQuantity(math.MaxInt64, resource.BinarySI),
},

View File

@ -1,331 +0,0 @@
package mutation
import (
"math"
"reflect"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clusterapis "github.com/karmada-io/api/cluster"
)
func TestMutateCluster(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
}{
{
name: "test mutate cluster",
args: args{
cluster: &clusterapis.Cluster{
Spec: clusterapis.ClusterSpec{
Taints: []corev1.Taint{
{
Key: "foo",
Value: "abc",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "bar",
Effect: corev1.TaintEffectNoExecute,
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
MutateCluster(tt.args.cluster)
for i := range tt.args.cluster.Spec.Taints {
if tt.args.cluster.Spec.Taints[i].Effect == corev1.TaintEffectNoExecute && tt.args.cluster.Spec.Taints[i].TimeAdded == nil {
t.Error("failed to mutate cluster, taints TimeAdded should not be nil")
}
}
})
}
}
func TestStandardizeClusterResourceModels(t *testing.T) {
testCases := map[string]struct {
models []clusterapis.ResourceModel
expectedModels []clusterapis.ResourceModel
}{
"sort models": {
models: []clusterapis.ResourceModel{
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"start with 0": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"end with MaxInt64": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
}
for name, testCase := range testCases {
StandardizeClusterResourceModels(testCase.models)
if !reflect.DeepEqual(testCase.models, testCase.expectedModels) {
t.Errorf("expected sorted resource models for %q, but it did not work", name)
return
}
}
}
func TestSetDefaultClusterResourceModels(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
wantModels []clusterapis.ResourceModel
}{
{
name: "test set default Cluster",
args: args{
cluster: &clusterapis.Cluster{},
},
wantModels: []clusterapis.ResourceModel{
{
Grade: 0,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(0, resource.BinarySI),
Max: *resource.NewQuantity(4*GB, resource.BinarySI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(4*GB, resource.BinarySI),
Max: *resource.NewQuantity(16*GB, resource.BinarySI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(4, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(16*GB, resource.BinarySI),
Max: *resource.NewQuantity(32*GB, resource.BinarySI),
},
},
},
{
Grade: 3,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(4, resource.DecimalSI),
Max: *resource.NewQuantity(8, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(32*GB, resource.BinarySI),
Max: *resource.NewQuantity(64*GB, resource.BinarySI),
},
},
},
{
Grade: 4,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(8, resource.DecimalSI),
Max: *resource.NewQuantity(16, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(64*GB, resource.BinarySI),
Max: *resource.NewQuantity(128*GB, resource.BinarySI),
},
},
},
{
Grade: 5,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(16, resource.DecimalSI),
Max: *resource.NewQuantity(32, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(128*GB, resource.BinarySI),
Max: *resource.NewQuantity(256*GB, resource.BinarySI),
},
},
},
{
Grade: 6,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(32, resource.DecimalSI),
Max: *resource.NewQuantity(64, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(256*GB, resource.BinarySI),
Max: *resource.NewQuantity(512*GB, resource.BinarySI),
},
},
},
{
Grade: 7,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(64, resource.DecimalSI),
Max: *resource.NewQuantity(128, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(512*GB, resource.BinarySI),
Max: *resource.NewQuantity(1024*GB, resource.BinarySI),
},
},
},
{
Grade: 8,
Ranges: []clusterapis.ResourceModelRange{
{
Name: clusterapis.ResourceCPU,
Min: *resource.NewQuantity(128, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
{
Name: clusterapis.ResourceMemory,
Min: *resource.NewQuantity(1024*GB, resource.BinarySI),
Max: *resource.NewQuantity(math.MaxInt64, resource.BinarySI),
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
SetDefaultClusterResourceModels(tt.args.cluster)
})
if !reflect.DeepEqual(tt.args.cluster.Spec.ResourceModels, tt.wantModels) {
t.Errorf("SetDefaultClusterResourceModels expected resourceModels %+v, bud get %+v", tt.wantModels, tt.args.cluster.Spec.ResourceModels)
return
}
}
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
@ -6,33 +22,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// ResourceCPU in cores. (e,g. 500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// ResourceMemory in bytes. (e,g. 500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// ResourceStorage is volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// ResourceEphemeralStorage is local ephemeral storage, in bytes. (e,g. 500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
//revive:disable:exported
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Cluster represents the desire state and status of a member cluster.
// Cluster represents the desired state and status of a member cluster.
type Cluster struct {
metav1.TypeMeta
metav1.ObjectMeta
@ -48,8 +44,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct {
// ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and typically collected automatically
// from member cluster during the progress of registration.
// It is different from the object uid(.metadata.uid) and is typically collected automatically
// from each member cluster during the process of registration.
//
// The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -67,7 +63,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"`
// SyncMode describes how a cluster sync resources from karmada control plane.
// SyncMode describes how a cluster syncs resources from karmada control plane.
// +required
SyncMode ClusterSyncMode
@ -76,14 +72,14 @@ type ClusterSpec struct {
// +optional
APIEndpoint string
// SecretRef represents the secret contains mandatory credentials to access the member cluster.
// SecretRef represents the secret that contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows:
// - secret.data.token
// - secret.data.caBundle
// +optional
SecretRef *LocalSecretReference
// ImpersonatorSecretRef represents the secret contains the token of impersonator.
// ImpersonatorSecretRef represents the secret that contains the token of impersonator.
// The secret should hold credentials as follows:
// - secret.data.token
// +optional
@ -98,12 +94,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster.
// More details please refer to: https://github.com/kubernetes/client-go/issues/351
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional
ProxyURL string
// ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and value is the associated header payloads.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional
ProxyHeader map[string]string
@ -112,15 +108,25 @@ type ClusterSpec struct {
// +optional
Provider string
// Region represents the region of the member cluster locate in.
// Region represents the region in which the member cluster is located.
// +optional
Region string
// Zone represents the zone of the member cluster locate in.
// Zone represents the zone in which the member cluster is located.
// Deprecated: This field was never used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional
Zone string
// Taints attached to the member cluster.
// Zones represents the failure zones(also called availability zones) of the
// member cluster. The zones are presented as a slice to support the case
// that cluster runs across multiple failure zones.
// Refer https://kubernetes.io/docs/setup/best-practices/multiple-zones/ for
// more details about running Kubernetes in multiple zones.
// +optional
Zones []string `json:"zones,omitempty"`
// Taints are attached to the member cluster.
// Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint.
// +optional
@ -198,8 +204,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min.
@ -207,7 +213,7 @@ type ResourceModel struct {
type ResourceModelRange struct {
// Name is the name for the resource that you want to categorize.
// +required
Name ResourceName
Name corev1.ResourceName
// Min is the minimum amount of this resource represented by resource name.
// Note: The Min value of first grade(usually 0) always acts as zero.
@ -236,13 +242,13 @@ const (
type ClusterSyncMode string
const (
// Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane then pushes them to member cluster.
// Push means that the controller on the karmada control plane will be in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster.
Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies locally on the member cluster.
// Pull means that the controller running on the member cluster will be in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them
// and applies them locally on the member cluster.
Pull ClusterSyncMode = "Pull"
)
@ -252,7 +258,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced.
Namespace string
// Name is the name of resource being referenced.
// Name is the name of the resource being referenced.
Name string
}
@ -260,6 +266,9 @@ type LocalSecretReference struct {
const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
)
// ClusterStatus contains information about the current status of a
@ -269,7 +278,7 @@ type ClusterStatus struct {
// +optional
KubernetesVersion string
// APIEnablements represents the list of APIs installed in the member cluster.
// APIEnablements represents the list of APIs installed on the member cluster.
// +optional
APIEnablements []APIEnablement
@ -284,6 +293,11 @@ type ClusterStatus struct {
// ResourceSummary represents the summary of resources in the member cluster.
// +optional
ResourceSummary *ResourceSummary
// RemedyActions represents the remedy actions that needs to be performed
// on the cluster.
// +optional
RemedyActions []string
}
// APIEnablement is a list of API resource, it is used to expose the name of the
@ -355,7 +369,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member cluster
// ClusterList contains a list of member clusters
type ClusterList struct {
metav1.TypeMeta
metav1.ListMeta

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// String returns a well-formatted string for the Cluster object.

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -17,33 +33,13 @@ const (
ResourceNamespaceScopedCluster = false
)
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// ResourceCPU in cores. (e,g. 500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// ResourceMemory in bytes. (e,g. 500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// ResourceStorage is volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// ResourceEphemeralStorage is local ephemeral storage, in bytes. (e,g. 500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:subresource:status
// Cluster represents the desire state and status of a member cluster.
// Cluster represents the desired state and status of a member cluster.
type Cluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@ -59,8 +55,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct {
// ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and typically collected automatically
// from member cluster during the progress of registration.
// It is different from the object uid(.metadata.uid) and is typically collected automatically
// from each member cluster during the process of registration.
//
// The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -78,7 +74,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"`
// SyncMode describes how a cluster sync resources from karmada control plane.
// SyncMode describes how a cluster syncs resources from karmada control plane.
// +kubebuilder:validation:Enum=Push;Pull
// +required
SyncMode ClusterSyncMode `json:"syncMode"`
@ -88,14 +84,14 @@ type ClusterSpec struct {
// +optional
APIEndpoint string `json:"apiEndpoint,omitempty"`
// SecretRef represents the secret contains mandatory credentials to access the member cluster.
// SecretRef represents the secret that contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows:
// - secret.data.token
// - secret.data.caBundle
// +optional
SecretRef *LocalSecretReference `json:"secretRef,omitempty"`
// ImpersonatorSecretRef represents the secret contains the token of impersonator.
// ImpersonatorSecretRef represents the secret that contains the token of impersonator.
// The secret should hold credentials as follows:
// - secret.data.token
// +optional
@ -110,12 +106,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster.
// More details please refer to: https://github.com/kubernetes/client-go/issues/351
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional
ProxyURL string `json:"proxyURL,omitempty"`
// ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and value is the associated header payloads.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional
ProxyHeader map[string]string `json:"proxyHeader,omitempty"`
@ -124,15 +120,25 @@ type ClusterSpec struct {
// +optional
Provider string `json:"provider,omitempty"`
// Region represents the region of the member cluster locate in.
// Region represents the region in which the member cluster is located.
// +optional
Region string `json:"region,omitempty"`
// Zone represents the zone of the member cluster locate in.
// Zone represents the zone in which the member cluster is located.
// Deprecated: This field was never been used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional
Zone string `json:"zone,omitempty"`
// Taints attached to the member cluster.
// Zones represents the failure zones(also called availability zones) of the
// member cluster. The zones are presented as a slice to support the case
// that cluster runs across multiple failure zones.
// Refer https://kubernetes.io/docs/setup/best-practices/multiple-zones/ for
// more details about running Kubernetes in multiple zones.
// +optional
Zones []string `json:"zones,omitempty"`
// Taints are attached to the member cluster.
// Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint.
// +optional
@ -210,8 +216,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min.
@ -219,7 +225,7 @@ type ResourceModel struct {
type ResourceModelRange struct {
// Name is the name for the resource that you want to categorize.
// +required
Name ResourceName `json:"name"`
Name corev1.ResourceName `json:"name"`
// Min is the minimum amount of this resource represented by resource name.
// Note: The Min value of first grade(usually 0) always acts as zero.
@ -248,13 +254,13 @@ const (
type ClusterSyncMode string
const (
// Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane then pushes them to member cluster.
// Push means that the controller on the karmada control plane will be in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster.
Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies locally on the member cluster.
// Pull means that the controller running on the member cluster will be in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them
// and applies them locally on the member cluster.
Pull ClusterSyncMode = "Pull"
)
@ -264,7 +270,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced.
Namespace string `json:"namespace"`
// Name is the name of resource being referenced.
// Name is the name of the resource being referenced.
Name string `json:"name"`
}
@ -272,6 +278,9 @@ type LocalSecretReference struct {
const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
)
// ClusterStatus contains information about the current status of a
@ -281,7 +290,7 @@ type ClusterStatus struct {
// +optional
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// APIEnablements represents the list of APIs installed in the member cluster.
// APIEnablements represents the list of APIs installed on the member cluster.
// +optional
APIEnablements []APIEnablement `json:"apiEnablements,omitempty"`
@ -296,6 +305,11 @@ type ClusterStatus struct {
// ResourceSummary represents the summary of resources in the member cluster.
// +optional
ResourceSummary *ResourceSummary `json:"resourceSummary,omitempty"`
// RemedyActions represents the remedy actions that needs to be performed
// on the cluster.
// +optional
RemedyActions []string `json:"remedyActions,omitempty"`
}
// APIEnablement is a list of API resource, it is used to expose the name of the
@ -364,7 +378,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member cluster
// ClusterList contains a list of member clusters
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`

View File

@ -1,8 +1,24 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// TaintClusterUnscheduler will be added when cluster becomes unschedulable
// and removed when cluster becomes scheduable.
// and removed when cluster becomes schedulable.
TaintClusterUnscheduler = "cluster.karmada.io/unschedulable"
// TaintClusterNotReady will be added when cluster is not ready
// and removed when cluster becomes ready.
@ -11,8 +27,6 @@ const (
// (corresponding to ClusterConditionReady status ConditionUnknown)
// and removed when cluster becomes reachable (ClusterConditionReady status ConditionTrue).
TaintClusterUnreachable = "cluster.karmada.io/unreachable"
// TaintClusterTerminating will be added when cluster is terminating.
TaintClusterTerminating = "cluster.karmada.io/terminating"
// CacheSourceAnnotationKey is the annotation that added to a resource to
// represent which cluster it cached from.

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
@ -331,6 +347,7 @@ func autoConvert_v1alpha1_ClusterSpec_To_cluster_ClusterSpec(in *ClusterSpec, ou
out.Provider = in.Provider
out.Region = in.Region
out.Zone = in.Zone
out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
out.ResourceModels = *(*[]cluster.ResourceModel)(unsafe.Pointer(&in.ResourceModels))
return nil
@ -353,6 +370,7 @@ func autoConvert_cluster_ClusterSpec_To_v1alpha1_ClusterSpec(in *cluster.Cluster
out.Provider = in.Provider
out.Region = in.Region
out.Zone = in.Zone
out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
out.ResourceModels = *(*[]ResourceModel)(unsafe.Pointer(&in.ResourceModels))
return nil
@ -369,6 +387,7 @@ func autoConvert_v1alpha1_ClusterStatus_To_cluster_ClusterStatus(in *ClusterStat
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.NodeSummary = (*cluster.NodeSummary)(unsafe.Pointer(in.NodeSummary))
out.ResourceSummary = (*cluster.ResourceSummary)(unsafe.Pointer(in.ResourceSummary))
out.RemedyActions = *(*[]string)(unsafe.Pointer(&in.RemedyActions))
return nil
}
@ -383,6 +402,7 @@ func autoConvert_cluster_ClusterStatus_To_v1alpha1_ClusterStatus(in *cluster.Clu
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.NodeSummary = (*NodeSummary)(unsafe.Pointer(in.NodeSummary))
out.ResourceSummary = (*ResourceSummary)(unsafe.Pointer(in.ResourceSummary))
out.RemedyActions = *(*[]string)(unsafe.Pointer(&in.RemedyActions))
return nil
}
@ -458,7 +478,7 @@ func Convert_cluster_ResourceModel_To_v1alpha1_ResourceModel(in *cluster.Resourc
}
func autoConvert_v1alpha1_ResourceModelRange_To_cluster_ResourceModelRange(in *ResourceModelRange, out *cluster.ResourceModelRange, s conversion.Scope) error {
out.Name = cluster.ResourceName(in.Name)
out.Name = v1.ResourceName(in.Name)
out.Min = in.Min
out.Max = in.Max
return nil
@ -470,7 +490,7 @@ func Convert_v1alpha1_ResourceModelRange_To_cluster_ResourceModelRange(in *Resou
}
func autoConvert_cluster_ResourceModelRange_To_v1alpha1_ResourceModelRange(in *cluster.ResourceModelRange, out *ResourceModelRange, s conversion.Scope) error {
out.Name = ResourceName(in.Name)
out.Name = v1.ResourceName(in.Name)
out.Min = in.Min
out.Max = in.Max
return nil

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -170,6 +186,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val
}
}
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]v1.Taint, len(*in))
@ -224,6 +245,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = new(ResourceSummary)
(*in).DeepCopyInto(*out)
}
if in.RemedyActions != nil {
in, out := &in.RemedyActions, &out.RemedyActions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package cluster
@ -170,6 +186,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val
}
}
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]v1.Taint, len(*in))
@ -224,6 +245,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = new(ResourceSummary)
(*in).DeepCopyInto(*out)
}
if in.RemedyActions != nil {
in, out := &in.RemedyActions, &out.RemedyActions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -153,6 +169,13 @@ type DependentObjectReference struct {
Namespace string `json:"namespace,omitempty"`
// Name represents the name of the referent.
// +required
Name string `json:"name"`
// Name and LabelSelector cannot be empty at the same time.
// +optional
Name string `json:"name,omitempty"`
// LabelSelector represents a label query over a set of resources.
// If name is not empty, labelSelector will be ignored.
// Name and LabelSelector cannot be empty at the same time.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
}

View File

@ -1,14 +1,44 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// ResourceKindResourceInterpreterCustomization is kind name of ResourceInterpreterCustomization.
ResourceKindResourceInterpreterCustomization = "ResourceInterpreterCustomization"
// ResourceSingularResourceInterpreterCustomization is singular name of ResourceInterpreterCustomization.
ResourceSingularResourceInterpreterCustomization = "resourceinterpretercustomization"
// ResourcePluralResourceInterpreterCustomization is plural name of ResourceInterpreterCustomization.
ResourcePluralResourceInterpreterCustomization = "resourceinterpretercustomizations"
// ResourceNamespaceScopedResourceInterpreterCustomization indicates if ResourceInterpreterCustomization is NamespaceScoped.
ResourceNamespaceScopedResourceInterpreterCustomization = false
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:resource:path=resourceinterpretercustomizations,scope="Cluster",shortName=ric,categories={karmada-io}
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.spec.target.apiVersion`,name="TARGET-API-VERSION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.target.kind`,name="TARGET-KIND",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ResourceInterpreterCustomization describes the configuration of a specific
// resource for Karmada to get the structure.
@ -112,11 +142,14 @@ type LocalValueRetention struct {
// to the desired specification.
//
// The script should implement a function as follows:
// luaScript: >
// function Retain(desiredObj, observedObj)
// desiredObj.spec.fieldFoo = observedObj.spec.fieldFoo
// return desiredObj
// end
//
// ```
// luaScript: >
// function Retain(desiredObj, observedObj)
// desiredObj.spec.fieldFoo = observedObj.spec.fieldFoo
// return desiredObj
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -140,16 +173,19 @@ type ReplicaResourceRequirement struct {
// replica as well as resource requirements
//
// The script should implement a function as follows:
// luaScript: >
// function GetReplicas(desiredObj)
// replica = desiredObj.spec.replicas
// requirement = {}
// requirement.nodeClaim = {}
// requirement.nodeClaim.nodeSelector = desiredObj.spec.template.spec.nodeSelector
// requirement.nodeClaim.tolerations = desiredObj.spec.template.spec.tolerations
// requirement.resourceRequest = desiredObj.spec.template.spec.containers[1].resources.limits
// return replica, requirement
// end
//
// ```
// luaScript: >
// function GetReplicas(desiredObj)
// replica = desiredObj.spec.replicas
// requirement = {}
// requirement.nodeClaim = {}
// requirement.nodeClaim.nodeSelector = desiredObj.spec.template.spec.nodeSelector
// requirement.nodeClaim.tolerations = desiredObj.spec.template.spec.tolerations
// requirement.resourceRequest = desiredObj.spec.template.spec.containers[1].resources.limits
// return replica, requirement
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -171,11 +207,14 @@ type ReplicaResourceRequirement struct {
type ReplicaRevision struct {
// LuaScript holds the Lua script that is used to revise replicas in the desired specification.
// The script should implement a function as follows:
// luaScript: >
// function ReviseReplica(desiredObj, desiredReplica)
// desiredObj.spec.replicas = desiredReplica
// return desiredObj
// end
//
// ```
// luaScript: >
// function ReviseReplica(desiredObj, desiredReplica)
// desiredObj.spec.replicas = desiredReplica
// return desiredObj
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -195,12 +234,15 @@ type ReplicaRevision struct {
type StatusReflection struct {
// LuaScript holds the Lua script that is used to get the status from the observed specification.
// The script should implement a function as follows:
// luaScript: >
// function ReflectStatus(observedObj)
// status = {}
// status.readyReplicas = observedObj.status.observedObj
// return status
// end
//
// ```
// luaScript: >
// function ReflectStatus(observedObj)
// status = {}
// status.readyReplicas = observedObj.status.observedObj
// return status
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -220,13 +262,16 @@ type StatusAggregation struct {
// LuaScript holds the Lua script that is used to aggregate decentralized statuses
// to the desired specification.
// The script should implement a function as follows:
// luaScript: >
// function AggregateStatus(desiredObj, statusItems)
// for i = 1, #statusItems do
// desiredObj.status.readyReplicas = desiredObj.status.readyReplicas + items[i].readyReplicas
// end
// return desiredObj
// end
//
// ```
// luaScript: >
// function AggregateStatus(desiredObj, statusItems)
// for i = 1, #statusItems do
// desiredObj.status.readyReplicas = desiredObj.status.readyReplicas + items[i].readyReplicas
// end
// return desiredObj
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -246,12 +291,15 @@ type HealthInterpretation struct {
// LuaScript holds the Lua script that is used to assess the health state of
// a specific resource.
// The script should implement a function as follows:
// luaScript: >
// function InterpretHealth(observedObj)
// if observedObj.status.readyReplicas == observedObj.spec.replicas then
// return true
// end
// end
//
// ```
// luaScript: >
// function InterpretHealth(observedObj)
// if observedObj.status.readyReplicas == observedObj.spec.replicas then
// return true
// end
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.
@ -272,20 +320,23 @@ type DependencyInterpretation struct {
// LuaScript holds the Lua script that is used to interpret the dependencies of
// a specific resource.
// The script should implement a function as follows:
// luaScript: >
// function GetDependencies(desiredObj)
// dependencies = {}
// if desiredObj.spec.serviceAccountName ~= "" and desiredObj.spec.serviceAccountName ~= "default" then
// dependency = {}
// dependency.apiVersion = "v1"
// dependency.kind = "ServiceAccount"
// dependency.name = desiredObj.spec.serviceAccountName
// dependency.namespace = desiredObj.namespace
// dependencies[1] = {}
// dependencies[1] = dependency
// end
// return dependencies
// end
//
// ```
// luaScript: >
// function GetDependencies(desiredObj)
// dependencies = {}
// serviceAccountName = desiredObj.spec.template.spec.serviceAccountName
// if serviceAccountName ~= nil and serviceAccountName ~= "default" then
// dependency = {}
// dependency.apiVersion = "v1"
// dependency.kind = "ServiceAccount"
// dependency.name = serviceAccountName
// dependency.namespace = desiredObj.metadata.namespace
// dependencies[1] = dependency
// end
// return dependencies
// end
// ```
//
// The content of the LuaScript needs to be a whole function including both
// declaration and implementation.

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -19,7 +35,7 @@ const (
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:resource:path=resourceinterpreterwebhookconfigurations,scope="Cluster",categories={karmada-io}
// +kubebuilder:storageversion
// ResourceInterpreterWebhookConfiguration describes the configuration of webhooks which take the responsibility to
@ -40,6 +56,24 @@ type ResourceInterpreterWebhook struct {
Name string `json:"name"`
// ClientConfig defines how to communicate with the hook.
// It supports two mutually exclusive configuration modes:
//
// 1. URL - Directly specify the webhook URL with format `scheme://host:port/path`.
// Example: https://webhook.example.com:8443/my-interpreter
//
// 2. Service - Reference a Kubernetes Service that exposes the webhook.
// When using Service reference, Karmada resolves the endpoint through following steps:
// a) First attempts to locate the Service in karmada-apiserver
// b) If found, constructs URL based on Service type:
// - ClusterIP/LoadBalancer/NodePort: Uses ClusterIP with port from Service spec
// (Note: Services with ClusterIP "None" are rejected), Example:
// `https://<cluster ip>:<port>`
// - ExternalName: Uses external DNS name format: `https://<external name>:<port>`
// c) If NOT found in karmada-apiserver, falls back to standard Kubernetes
// service DNS name format: `https://<service>.<namespace>.svc:<port>`
//
// Note: When both URL and Service are specified, the Service reference takes precedence
// and the URL configuration will be ignored.
// +required
ClientConfig admissionregistrationv1.WebhookClientConfig `json:"clientConfig"`
@ -83,7 +117,7 @@ type RuleWithOperations struct {
type InterpreterOperation string
const (
// InterpreterOperationAll indicates math all InterpreterOperation.
// InterpreterOperationAll indicates matching all InterpreterOperation.
InterpreterOperationAll InterpreterOperation = "*"
// InterpreterOperationInterpretReplica indicates that karmada want to figure out the replica declaration of a specific object.
@ -126,7 +160,7 @@ type Rule struct {
// ["apps", "batch", "example.io"] means matches 3 groups.
// ["*"] means matches all group
//
// Note: The group cloud be empty, e.g the 'core' group of kubernetes, in that case use [""].
// Note: The group could be empty, e.g the 'core' group of kubernetes, in that case use [""].
// +required
APIGroups []string `json:"apiGroups"`

View File

@ -1,12 +1,29 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha2 "github.com/karmada-io/api/work/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -96,6 +113,11 @@ func (in *DependencyInterpretation) DeepCopy() *DependencyInterpretation {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DependentObjectReference) DeepCopyInto(out *DependentObjectReference) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
@ -368,7 +390,9 @@ func (in *ResourceInterpreterResponse) DeepCopyInto(out *ResourceInterpreterResp
if in.Dependencies != nil {
in, out := &in.Dependencies, &out.Dependencies
*out = make([]DependentObjectReference, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RawStatus != nil {
in, out := &in.RawStatus, &out.RawStatus

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

30
go.mod
View File

@ -1,28 +1,30 @@
module github.com/karmada-io/api
go 1.19
go 1.23.8
require (
k8s.io/api v0.26.1
k8s.io/apiextensions-apiserver v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.2
k8s.io/api v0.32.3
k8s.io/apiextensions-apiserver v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.20.4
)
require (
github.com/go-logr/logr v1.2.3 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/text v0.5.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/text v0.24.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

81
go.sum
View File

@ -1,12 +1,17 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -14,19 +19,28 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -38,8 +52,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -48,8 +62,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -59,28 +73,27 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI=
k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM=
k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.14.2 h1:P6IwDhbsRWsBClt/8/h8Zy36bCuGuW5Op7MHpFrN/60=
sigs.k8s.io/controller-runtime v0.14.2/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY=
k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -19,7 +35,7 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=mci,categories={karmada-io}
// +kubebuilder:resource:path=multiclusteringresses,scope=Namespaced,shortName=mci,categories={karmada-io}
// MultiClusterIngress is a collection of rules that allow inbound connections to reach the
// endpoints defined by a backend. The structure of MultiClusterIngress is same as Ingress,
@ -34,7 +50,35 @@ type MultiClusterIngress struct {
// Status is the current state of the MultiClusterIngress.
// +optional
Status networkingv1.IngressStatus `json:"status,omitempty"`
Status MultiClusterIngressStatus `json:"status,omitempty"`
}
// MultiClusterIngressStatus is the current state of the MultiClusterIngress.
type MultiClusterIngressStatus struct {
networkingv1.IngressStatus `json:",inline"`
// TrafficBlockClusters records the cluster name list that needs to perform traffic block.
// When the cloud provider implements its multicluster-cloud-provider and refreshes
// the service backend address to the LoadBalancer Service, it needs to filter out
// the backend addresses in these clusters.
// +optional
TrafficBlockClusters []string `json:"trafficBlockClusters,omitempty"`
// ServiceLocations records the locations of MulticlusterIngress's backend
// Service resources. It will be set by the system controller.
// +optional
ServiceLocations []ServiceLocation `json:"serviceLocations,omitempty"`
}
// ServiceLocation records the locations of MulticlusterIngress's backend Service resources.
type ServiceLocation struct {
// name is the referenced service. The service must exist in
// the same namespace as the MultiClusterService object.
Name string `json:"name"`
// Clusters records the cluster list where the Service is located.
// +optional
Clusters []string `json:"clusters,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -0,0 +1,160 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// ResourceKindMultiClusterService is kind name of MultiClusterService.
ResourceKindMultiClusterService = "MultiClusterService"
// ResourceSingularMultiClusterService is singular name of MultiClusterService.
ResourceSingularMultiClusterService = "multiclusterservice"
// ResourcePluralMultiClusterService is plural name of MultiClusterService.
ResourcePluralMultiClusterService = "multiclusterservices"
// ResourceNamespaceScopedMultiClusterService indicates if MultiClusterService is NamespaceScoped.
ResourceNamespaceScopedMultiClusterService = true
// MCSServiceAppliedConditionType is indicates the condition type of mcs service applied.
MCSServiceAppliedConditionType = "ServiceApplied"
// EndpointSliceDispatched indicates whether the EndpointSlice is dispatched to consumption clusters
EndpointSliceDispatched string = "EndpointSliceDispatched"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=multiclusterservices,scope=Namespaced,shortName=mcs,categories={karmada-io}
// MultiClusterService is a named abstraction of multi-cluster software service.
// The name field of MultiClusterService is the same as that of Service name.
// Services with the same name in different clusters are regarded as the same
// service and are associated with the same MultiClusterService.
// MultiClusterService can control the exposure of services to outside multiple
// clusters, and also enable service discovery between clusters.
type MultiClusterService struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec is the desired state of the MultiClusterService.
Spec MultiClusterServiceSpec `json:"spec"`
// Status is the current state of the MultiClusterService.
// +optional
Status corev1.ServiceStatus `json:"status,omitempty"`
}
// MultiClusterServiceSpec is the desired state of the MultiClusterService.
type MultiClusterServiceSpec struct {
// Types specifies how to expose the service referencing by this
// MultiClusterService.
// +required
Types []ExposureType `json:"types"`
// Ports is the list of ports that are exposed by this MultiClusterService.
// No specified port will be filtered out during the service
// exposure and discovery process.
// All ports in the referencing service will be exposed by default.
// +optional
Ports []ExposurePort `json:"ports,omitempty"`
// Range specifies the ranges where the referencing service should
// be exposed.
// Only valid and optional in case of Types contains CrossCluster.
// If not set and Types contains CrossCluster, all clusters will
// be selected, that means the referencing service will be exposed
// across all registered clusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters.
// +optional
Range ExposureRange `json:"range,omitempty"`
// ServiceProvisionClusters specifies the clusters which will provision the service backend.
// If leave it empty, we will collect the backend endpoints from all clusters and sync
// them to the ServiceConsumptionClusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters.
// +optional
ServiceProvisionClusters []string `json:"serviceProvisionClusters,omitempty"`
// ServiceConsumptionClusters specifies the clusters where the service will be exposed, for clients.
// If leave it empty, the service will be exposed to all clusters.
// Deprecated: in favor of ProviderClusters/ConsumerClusters.
// +optional
ServiceConsumptionClusters []string `json:"serviceConsumptionClusters,omitempty"`
// ProviderClusters specifies the clusters which will provide the service backend.
// If leave it empty, we will collect the backend endpoints from all clusters and sync
// them to the ConsumerClusters.
// +optional
ProviderClusters []ClusterSelector `json:"providerClusters,omitempty"`
// ConsumerClusters specifies the clusters where the service will be exposed, for clients.
// If leave it empty, the service will be exposed to all clusters.
// +optional
ConsumerClusters []ClusterSelector `json:"consumerClusters,omitempty"`
}
// ClusterSelector specifies the cluster to be selected.
type ClusterSelector struct {
// Name is the name of the cluster to be selected.
// +required
Name string `json:"name,omitempty"`
}
// ExposureType describes how to expose the service.
type ExposureType string
const (
// ExposureTypeCrossCluster means a service will be accessible across clusters.
ExposureTypeCrossCluster ExposureType = "CrossCluster"
// ExposureTypeLoadBalancer means a service will be exposed via an external
// load balancer.
ExposureTypeLoadBalancer ExposureType = "LoadBalancer"
)
// ExposurePort describes which port will be exposed.
type ExposurePort struct {
// Name is the name of the port that needs to be exposed within the service.
// The port name must be the same as that defined in the service.
// +optional
Name string `json:"name,omitempty"`
// Port specifies the exposed service port.
// +required
Port int32 `json:"port"`
}
// ExposureRange describes a list of clusters where the service is exposed.
// Now supports selecting cluster by name, leave the room for extend more methods
// such as using label selector.
type ExposureRange struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MultiClusterServiceList is a collection of MultiClusterService.
type MultiClusterServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// Items is the list of MultiClusterService.
Items []MultiClusterService `json:"items"`
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// MultiClusterServicePermanentIDLabel is the identifier of a MultiClusterService object.
// Karmada generates a unique identifier, such as metadata.UUID, for each MultiClusterService object.
// This identifier will be used as a label selector to locate corresponding work of service.
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
MultiClusterServicePermanentIDLabel = "multiclusterservice.karmada.io/permanent-id"
// MultiClusterServiceNameAnnotation is the name of a MultiClusterService object.
// This annotation will be added to the resource template and ResourceBinding
MultiClusterServiceNameAnnotation = "multiclusterservice.karmada.io/name"
// MultiClusterServiceNamespaceAnnotation is the namespace of a MultiClusterService object.
// This annotation will be added to the resource template and ResourceBinding
MultiClusterServiceNamespaceAnnotation = "multiclusterservice.karmada.io/namespace"
)

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -9,6 +25,59 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSelector) DeepCopyInto(out *ClusterSelector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelector.
func (in *ClusterSelector) DeepCopy() *ClusterSelector {
if in == nil {
return nil
}
out := new(ClusterSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposurePort) DeepCopyInto(out *ExposurePort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposurePort.
func (in *ExposurePort) DeepCopy() *ExposurePort {
if in == nil {
return nil
}
out := new(ExposurePort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExposureRange) DeepCopyInto(out *ExposureRange) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposureRange.
func (in *ExposureRange) DeepCopy() *ExposureRange {
if in == nil {
return nil
}
out := new(ExposureRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterIngress) DeepCopyInto(out *MultiClusterIngress) {
*out = *in
@ -69,3 +138,161 @@ func (in *MultiClusterIngressList) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterIngressStatus) DeepCopyInto(out *MultiClusterIngressStatus) {
*out = *in
in.IngressStatus.DeepCopyInto(&out.IngressStatus)
if in.TrafficBlockClusters != nil {
in, out := &in.TrafficBlockClusters, &out.TrafficBlockClusters
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServiceLocations != nil {
in, out := &in.ServiceLocations, &out.ServiceLocations
*out = make([]ServiceLocation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterIngressStatus.
func (in *MultiClusterIngressStatus) DeepCopy() *MultiClusterIngressStatus {
if in == nil {
return nil
}
out := new(MultiClusterIngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterService) DeepCopyInto(out *MultiClusterService) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterService.
func (in *MultiClusterService) DeepCopy() *MultiClusterService {
if in == nil {
return nil
}
out := new(MultiClusterService)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MultiClusterService) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterServiceList) DeepCopyInto(out *MultiClusterServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MultiClusterService, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterServiceList.
func (in *MultiClusterServiceList) DeepCopy() *MultiClusterServiceList {
if in == nil {
return nil
}
out := new(MultiClusterServiceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MultiClusterServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MultiClusterServiceSpec) DeepCopyInto(out *MultiClusterServiceSpec) {
*out = *in
if in.Types != nil {
in, out := &in.Types, &out.Types
*out = make([]ExposureType, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ExposurePort, len(*in))
copy(*out, *in)
}
in.Range.DeepCopyInto(&out.Range)
if in.ServiceProvisionClusters != nil {
in, out := &in.ServiceProvisionClusters, &out.ServiceProvisionClusters
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServiceConsumptionClusters != nil {
in, out := &in.ServiceConsumptionClusters, &out.ServiceConsumptionClusters
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ProviderClusters != nil {
in, out := &in.ProviderClusters, &out.ProviderClusters
*out = make([]ClusterSelector, len(*in))
copy(*out, *in)
}
if in.ConsumerClusters != nil {
in, out := &in.ConsumerClusters, &out.ConsumerClusters
*out = make([]ClusterSelector, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterServiceSpec.
func (in *MultiClusterServiceSpec) DeepCopy() *MultiClusterServiceSpec {
if in == nil {
return nil
}
out := new(MultiClusterServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceLocation) DeepCopyInto(out *ServiceLocation) {
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceLocation.
func (in *ServiceLocation) DeepCopy() *ServiceLocation {
if in == nil {
return nil
}
out := new(ServiceLocation)
in.DeepCopyInto(out)
return out
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
@ -44,6 +63,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&MultiClusterIngress{},
&MultiClusterIngressList{},
&MultiClusterService{},
&MultiClusterServiceList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)

View File

@ -1,107 +0,0 @@
package constants
import (
"time"
"k8s.io/apimachinery/pkg/labels"
)
const (
// KubeDefaultRepository defines the default of the k8s image repository
KubeDefaultRepository = "registry.k8s.io"
// KarmadaDefaultRepository defines the default of the karmada image repository
KarmadaDefaultRepository = "docker.io/karmada"
// EtcdDefaultVersion defines the default of the karmada etcd image tag
EtcdDefaultVersion = "3.5.3-0"
// KarmadaDefaultVersion defines the default of the karmada components image tag
KarmadaDefaultVersion = "v1.4.0"
// KubeDefaultVersion defines the default of the karmada apiserver and kubeControllerManager image tag
KubeDefaultVersion = "v1.25.4"
// KarmadaDefaultServiceSubnet defines the default of the subnet used by k8s services.
KarmadaDefaultServiceSubnet = "10.96.0.0/12"
// KarmadaDefaultDNSDomain defines the default of the DNSDomain
KarmadaDefaultDNSDomain = "cluster.local"
// KarmadaOperator defines the name of the karmada operator.
KarmadaOperator = "karmada-operator"
// Etcd defines the name of the built-in etcd cluster component
Etcd = "etcd"
// KarmadaAPIServer defines the name of the karmada-apiserver component
KarmadaAPIServer = "karmada-apiserver"
// KubeAPIServer defines the repository name of the kube apiserver
KubeAPIServer = "kube-apiserver"
// KarmadaAggregatedAPIServer defines the name of the karmada-aggregated-apiserver component
KarmadaAggregatedAPIServer = "karmada-aggregated-apiserver"
// KubeControllerManager defines the name of the kube-controller-manager component
KubeControllerManager = "kube-controller-manager"
// KarmadaControllerManager defines the name of the karmada-controller-manager component
KarmadaControllerManager = "karmada-controller-manager"
// KarmadaScheduler defines the name of the karmada-scheduler component
KarmadaScheduler = "karmada-scheduler"
// KarmadaWebhook defines the name of the karmada-webhook component
KarmadaWebhook = "karmada-webhook"
// KarmadaDescheduler defines the name of the karmada-descheduler component
KarmadaDescheduler = "karmada-descheduler"
// KarmadaSystemNamespace defines the leader selection namespace for karmada components
KarmadaSystemNamespace = "karmada-system"
// KarmadaDataDir defines the karmada data dir
KarmadaDataDir = "/var/lib/karmada"
// EtcdListenClientPort defines the port etcd listen on for client traffic
EtcdListenClientPort = 2379
// EtcdMetricsPort is the port at which to obtain etcd metrics and health status
EtcdMetricsPort = 2381
// EtcdListenPeerPort defines the port etcd listen on for peer traffic
EtcdListenPeerPort = 2380
// KarmadaAPIserverListenClientPort defines the port karmada apiserver listen on for client traffic
KarmadaAPIserverListenClientPort = 5443
// EtcdDataVolumeName defines the name to etcd data volume
EtcdDataVolumeName = "etcd-data"
// CertificateValidity Certificate validity period
CertificateValidity = time.Hour * 24 * 365
// CaCertAndKeyName ca certificate key name
CaCertAndKeyName = "ca"
// EtcdCaCertAndKeyName etcd ca certificate key name
EtcdCaCertAndKeyName = "etcd-ca"
// EtcdServerCertAndKeyName etcd server certificate key name
EtcdServerCertAndKeyName = "etcd-server"
// EtcdClientCertAndKeyName etcd client certificate key name
EtcdClientCertAndKeyName = "etcd-client"
// KarmadaCertAndKeyName karmada certificate key name
KarmadaCertAndKeyName = "karmada"
// ApiserverCertAndKeyName karmada apiserver certificate key name
ApiserverCertAndKeyName = "apiserver"
// FrontProxyCaCertAndKeyName front-proxy-client certificate key name
FrontProxyCaCertAndKeyName = "front-proxy-ca"
// FrontProxyClientCertAndKeyName front-proxy-client certificate key name
FrontProxyClientCertAndKeyName = "front-proxy-client"
// ClusterName karmada cluster name
ClusterName = "karmada-apiserver"
// UserName karmada cluster user name
UserName = "karmada-admin"
// KarmadaAPIserverComponent defines the name of karmada-apiserver component
KarmadaAPIserverComponent = "KarmadaAPIServer"
// KarmadaAggregatedAPIServerComponent defines the name of karmada-aggregated-apiserver component
KarmadaAggregatedAPIServerComponent = "KarmadaAggregatedAPIServer"
// KubeControllerManagerComponent defines the name of kube-controller-manager-component
KubeControllerManagerComponent = "KubeControllerManager"
// KarmadaControllerManagerComponent defines the name of karmada-controller-manager component
KarmadaControllerManagerComponent = "KarmadaControllerManager"
// KarmadaSchedulerComponent defines the name of karmada-scheduler component
KarmadaSchedulerComponent = "KarmadaScheduler"
// KarmadaWebhookComponent defines the name of the karmada-webhook component
KarmadaWebhookComponent = "KarmadaWebhook"
// KarmadaDeschedulerComponent defines the name of the karmada-descheduler component
KarmadaDeschedulerComponent = "KarmadaDescheduler"
// KarmadaOperatorLabelKeyName defines a label key used by all of resources created by karmada operator
KarmadaOperatorLabelKeyName = "app.kubernetes.io/managed-by"
)
var (
// KarmadaOperatorLabel defines the default labels in the resource create by karmada operator
KarmadaOperatorLabel = labels.Set{KarmadaOperatorLabelKeyName: KarmadaOperator}
)

View File

@ -1,229 +0,0 @@
package v1alpha1
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"github.com/karmada-io/api/operator/constants"
)
var (
etcdImageRepository = fmt.Sprintf("%s/%s", constants.KubeDefaultRepository, constants.Etcd)
karmadaAPIServiceImageRepository = fmt.Sprintf("%s/%s", constants.KubeDefaultRepository, constants.KubeAPIServer)
karmadaAggregatedAPIServerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaAggregatedAPIServer)
kubeControllerManagerImageRepository = fmt.Sprintf("%s/%s", constants.KubeDefaultRepository, constants.KubeControllerManager)
karmadaControllerManagerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaControllerManager)
karmadaSchedulerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaScheduler)
karmadaWebhookImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaWebhook)
karmadaDeschedulerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaDescheduler)
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&Karmada{}, func(obj interface{}) { SetObjectDefaultsKarmada(obj.(*Karmada)) })
return nil
}
// SetObjectDefaultsKarmada set defaults for karmada
func SetObjectDefaultsKarmada(in *Karmada) {
setDefaultsKarmada(in)
}
func setDefaultsKarmada(obj *Karmada) {
setDefaultsHostCluster(obj)
setDefaultsKarmadaComponents(obj)
}
func setDefaultsKarmadaComponents(obj *Karmada) {
if obj.Spec.Components == nil {
obj.Spec.Components = &KarmadaComponents{}
}
setDefaultsEtcd(obj.Spec.Components)
setDefaultsKarmadaAPIServer(obj.Spec.Components)
setDefaultsKarmadaAggregatedAPIServer(obj.Spec.Components)
setDefaultsKubeControllerManager(obj.Spec.Components)
setDefaultsKarmadaControllerManager(obj.Spec.Components)
setDefaultsKarmadaScheduler(obj.Spec.Components)
setDefaultsKarmadaWebhook(obj.Spec.Components)
// set addon defaults
setDefaultsKarmadaDescheduler(obj.Spec.Components)
}
func setDefaultsHostCluster(obj *Karmada) {
if obj.Spec.HostCluster == nil {
obj.Spec.HostCluster = &HostCluster{}
}
hc := obj.Spec.HostCluster
if hc.Networking == nil {
hc.Networking = &Networking{}
}
if hc.Networking.DNSDomain == nil {
hc.Networking.DNSDomain = pointer.String(constants.KarmadaDefaultDNSDomain)
}
}
func setDefaultsEtcd(obj *KarmadaComponents) {
if obj.Etcd == nil {
obj.Etcd = &Etcd{}
}
if obj.Etcd.External == nil {
if obj.Etcd.Local == nil {
obj.Etcd.Local = &LocalEtcd{}
}
if obj.Etcd.Local.Replicas == nil {
obj.Etcd.Local.Replicas = pointer.Int32(1)
}
if len(obj.Etcd.Local.Image.ImageRepository) == 0 {
obj.Etcd.Local.Image.ImageRepository = etcdImageRepository
}
if len(obj.Etcd.Local.Image.ImageTag) == 0 {
obj.Etcd.Local.Image.ImageTag = constants.EtcdDefaultVersion
}
if obj.Etcd.Local.VolumeData == nil {
obj.Etcd.Local.VolumeData = &VolumeData{}
}
if obj.Etcd.Local.VolumeData.EmptyDir == nil && obj.Etcd.Local.VolumeData.HostPath == nil && obj.Etcd.Local.VolumeData.VolumeClaim == nil {
obj.Etcd.Local.VolumeData.EmptyDir = &corev1.EmptyDirVolumeSource{}
}
}
}
func setDefaultsKarmadaAPIServer(obj *KarmadaComponents) {
if obj.KarmadaAPIServer == nil {
obj.KarmadaAPIServer = &KarmadaAPIServer{}
}
apiserver := obj.KarmadaAPIServer
if len(apiserver.Image.ImageRepository) == 0 {
apiserver.Image.ImageRepository = karmadaAPIServiceImageRepository
}
if len(apiserver.Image.ImageTag) == 0 {
apiserver.Image.ImageTag = constants.KubeDefaultVersion
}
if apiserver.Replicas == nil {
apiserver.Replicas = pointer.Int32(1)
}
if apiserver.ServiceSubnet == nil {
apiserver.ServiceSubnet = pointer.String(constants.KarmadaDefaultServiceSubnet)
}
if len(apiserver.ServiceType) == 0 {
apiserver.ServiceType = corev1.ServiceTypeClusterIP
}
}
func setDefaultsKarmadaAggregatedAPIServer(obj *KarmadaComponents) {
if obj.KarmadaAggregatedAPIServer == nil {
obj.KarmadaAggregatedAPIServer = &KarmadaAggregatedAPIServer{}
}
aggregated := obj.KarmadaAggregatedAPIServer
if len(aggregated.Image.ImageRepository) == 0 {
aggregated.Image.ImageRepository = karmadaAggregatedAPIServerImageRepository
}
if len(aggregated.Image.ImageTag) == 0 {
aggregated.Image.ImageTag = constants.KarmadaDefaultVersion
}
if aggregated.Replicas == nil {
aggregated.Replicas = pointer.Int32(1)
}
}
func setDefaultsKubeControllerManager(obj *KarmadaComponents) {
if obj.KubeControllerManager == nil {
obj.KubeControllerManager = &KubeControllerManager{}
}
kubeControllerManager := obj.KubeControllerManager
if len(kubeControllerManager.Image.ImageRepository) == 0 {
kubeControllerManager.Image.ImageRepository = kubeControllerManagerImageRepository
}
if len(kubeControllerManager.Image.ImageTag) == 0 {
kubeControllerManager.Image.ImageTag = constants.KubeDefaultVersion
}
if kubeControllerManager.Replicas == nil {
kubeControllerManager.Replicas = pointer.Int32(1)
}
}
func setDefaultsKarmadaControllerManager(obj *KarmadaComponents) {
if obj.KarmadaControllerManager == nil {
obj.KarmadaControllerManager = &KarmadaControllerManager{}
}
karmadaControllerManager := obj.KarmadaControllerManager
if len(karmadaControllerManager.Image.ImageRepository) == 0 {
karmadaControllerManager.Image.ImageRepository = karmadaControllerManagerImageRepository
}
if len(karmadaControllerManager.Image.ImageTag) == 0 {
karmadaControllerManager.Image.ImageTag = constants.KarmadaDefaultVersion
}
if karmadaControllerManager.Replicas == nil {
karmadaControllerManager.Replicas = pointer.Int32(1)
}
}
func setDefaultsKarmadaScheduler(obj *KarmadaComponents) {
if obj.KarmadaScheduler == nil {
obj.KarmadaScheduler = &KarmadaScheduler{}
}
scheduler := obj.KarmadaScheduler
if len(scheduler.Image.ImageRepository) == 0 {
scheduler.Image.ImageRepository = karmadaSchedulerImageRepository
}
if len(scheduler.Image.ImageTag) == 0 {
scheduler.Image.ImageTag = constants.KarmadaDefaultVersion
}
if scheduler.Replicas == nil {
scheduler.Replicas = pointer.Int32(1)
}
}
func setDefaultsKarmadaWebhook(obj *KarmadaComponents) {
if obj.KarmadaWebhook == nil {
obj.KarmadaWebhook = &KarmadaWebhook{}
}
webhook := obj.KarmadaWebhook
if len(webhook.Image.ImageRepository) == 0 {
webhook.Image.ImageRepository = karmadaWebhookImageRepository
}
if len(webhook.Image.ImageTag) == 0 {
webhook.Image.ImageTag = constants.KarmadaDefaultVersion
}
if webhook.Replicas == nil {
webhook.Replicas = pointer.Int32(1)
}
}
func setDefaultsKarmadaDescheduler(obj *KarmadaComponents) {
if obj.KarmadaDescheduler == nil {
return
}
descheduler := obj.KarmadaDescheduler
if len(descheduler.Image.ImageRepository) == 0 {
descheduler.Image.ImageRepository = karmadaDeschedulerImageRepository
}
if len(descheduler.Image.ImageTag) == 0 {
descheduler.Image.ImageTag = constants.KarmadaDefaultVersion
}
if descheduler.Replicas == nil {
descheduler.Replicas = pointer.Int32(1)
}
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:defaulter-gen=ObjectMeta
// +groupName=operator.karmada.io

View File

@ -1,10 +1,68 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Name returns the image name.
func (image *Image) Name() string {
return fmt.Sprintf("%s:%s", image.ImageRepository, image.ImageTag)
}
// KarmadaInProgressing sets the Karmada condition to Progressing.
func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: string(conditionType),
Status: metav1.ConditionFalse,
Reason: "Progressing",
Message: message,
}
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}
// KarmadaCompleted sets the Karmada condition to Completed.
func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: string(conditionType),
Status: metav1.ConditionTrue,
Reason: "Completed",
Message: message,
}
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}
// KarmadaFailed sets the Karmada condition to Failed.
func KarmadaFailed(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: string(conditionType),
Status: metav1.ConditionFalse,
Reason: "Failed",
Message: message,
}
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (

View File

@ -24,9 +24,9 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:path=karmadas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.status.controlPlaneReady`,name="Status",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:resource:path=karmadas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="READY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// Karmada enables declarative installation of karmada.
type Karmada struct {
@ -44,6 +44,38 @@ type Karmada struct {
Status KarmadaStatus `json:"status,omitempty"`
}
// CRDDownloadPolicy specifies a policy for how the operator will download the Karmada CRD tarball
type CRDDownloadPolicy string
const (
// DownloadAlways instructs the Karmada operator to always download the CRD tarball from a remote location.
DownloadAlways CRDDownloadPolicy = "Always"
// DownloadIfNotPresent instructs the Karmada operator to download the CRDs tarball from a remote location only if it is not yet present in the local cache.
DownloadIfNotPresent CRDDownloadPolicy = "IfNotPresent"
)
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
type HTTPSource struct {
// URL specifies the URL of the CRD tarball resource.
URL string `json:"url,omitempty"`
}
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
type CRDTarball struct {
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
// +optional
HTTPSource *HTTPSource `json:"httpSource,omitempty"`
// CRDDownloadPolicy specifies a policy that should be used to download the CRD tarball.
// Valid values are "Always" and "IfNotPresent".
// Defaults to "IfNotPresent".
// +kubebuilder:validation:Enum=Always;IfNotPresent
// +kubebuilder:default=IfNotPresent
// +optional
CRDDownloadPolicy *CRDDownloadPolicy `json:"crdDownloadPolicy,omitempty"`
}
// KarmadaSpec is the specification of the desired behavior of the Karmada.
type KarmadaSpec struct {
// HostCluster represents the cluster where to install the Karmada control plane.
@ -66,12 +98,53 @@ type KarmadaSpec struct {
// FeatureGates enabled by the user.
// - Failover: https://karmada.io/docs/userguide/failover/#failover
// - GragscefulEviction: https://karmada.io/docs/userguide/failover/#graceful-eviction-feature
// - GracefulEviction: https://karmada.io/docs/userguide/failover/#graceful-eviction-feature
// - PropagateDeps: https://karmada.io/docs/userguide/scheduling/propagate-dependencies
// - CustomizedClusterResourceModeling: https://karmada.io/docs/userguide/scheduling/cluster-resources#start-to-use-cluster-resource-models
// More info: https://github.com/karmada-io/karmada/blob/master/pkg/features/features.go
// +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
// If not set, the operator will download the tarball from a GitHub release.
// By default, it will download the tarball of the same version as the operator itself.
// For instance, if the operator's version is v1.10.0, the tarball will be downloaded from the following location:
// https://github.com/karmada-io/karmada/releases/download/v1.10.0/crds.tar.gz
// By default, the operator will only attempt to download the tarball if it's not yet present in the local cache.
// +optional
CRDTarball *CRDTarball `json:"crdTarball,omitempty"`
// CustomCertificate specifies the configuration to customize the certificates
// for Karmada components or control the certificate generation process, such as
// the algorithm, validity period, etc.
// Currently, it only supports customizing the CA certificate for limited components.
// +optional
CustomCertificate *CustomCertificate `json:"customCertificate,omitempty"`
// Suspend indicates that the operator should suspend reconciliation
// for this Karmada control plane and all its managed resources.
// Karmada instances for which this field is not explicitly set to `true` will continue to be reconciled as usual.
// +optional
Suspend *bool `json:"suspend,omitempty"`
}
// CustomCertificate holds the configuration for generating the certificate.
type CustomCertificate struct {
// APIServerCACert references a Kubernetes secret containing the CA certificate
// for component karmada-apiserver.
// The secret must contain the following data keys:
// - tls.crt: The TLS certificate.
// - tls.key: The TLS private key.
// If specified, this CA will be used to issue client certificates for
// all components that access the APIServer as clients.
// +optional
APIServerCACert *LocalSecretReference `json:"apiServerCACert,omitempty"`
// LeafCertValidityDays specifies the validity period of leaf certificates (e.g., API Server certificate) in days.
// If not specified, the default validity period of 1 year will be used.
// +kubebuilder:validation:Minimum=1
// +optional
LeafCertValidityDays *int32 `json:"leafCertValidityDays,omitempty"`
}
// ImageRegistry represents an image registry as well as the
@ -112,17 +185,21 @@ type KarmadaComponents struct {
// +optional
KarmadaScheduler *KarmadaScheduler `json:"karmadaScheduler,omitempty"`
// KarmadaWebhook holds settings to karmada-webook component of the karmada.
// KarmadaWebhook holds settings to karmada-webhook component of the karmada.
// +optional
KarmadaWebhook *KarmadaWebhook `json:"karmadaWebhook,omitempty"`
// KarmadaDescheduler holds settings to karmada-descheduler component of the karmada.
// +optional
KarmadaDescheduler *KarmadaDescheduler `json:"KarmadaDescheduler,omitempty"`
KarmadaDescheduler *KarmadaDescheduler `json:"karmadaDescheduler,omitempty"`
// KarmadaSearch holds settings to karmada search component of the karmada.
// +optional
KarmadaSearch *KarmadaSearch `json:"karmadaSearch,omitempty"`
// KarmadaMetricsAdapter holds settings to karmada metrics adapter component of the karmada.
// +optional
KarmadaMetricsAdapter *KarmadaMetricsAdapter `json:"karmadaMetricsAdapter,omitempty"`
}
// Networking contains elements describing cluster's networking configuration
@ -151,7 +228,7 @@ type LocalEtcd struct {
CommonSettings `json:",inline"`
// VolumeData describes the settings of etcd data store.
// We will support 3 modes: emtydir, hostPath, PVC. default by hostPath.
// We will support 3 modes: emptyDir, hostPath, PVC. default by hostPath.
// +optional
VolumeData *VolumeData `json:"volumeData,omitempty"`
@ -191,26 +268,39 @@ type VolumeData struct {
}
// ExternalEtcd describes an external etcd cluster.
// operator has no knowledge of where certificate files live and they must be supplied.
// operator has no knowledge of where certificate files live, and they must be supplied.
type ExternalEtcd struct {
// Endpoints of etcd members. Required for ExternalEtcd.
// +required
Endpoints []string `json:"endpoints"`
// CAData is an SSL Certificate Authority file used to secure etcd communication.
// Required if using a TLS connection.
CAData []byte `json:"caData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
CAData []byte `json:"caData,omitempty"`
// CertData is an SSL certification file used to secure etcd communication.
// Required if using a TLS connection.
CertData []byte `json:"certData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
CertData []byte `json:"certData,omitempty"`
// KeyData is an SSL key file used to secure etcd communication.
// Required if using a TLS connection.
KeyData []byte `json:"keyData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
KeyData []byte `json:"keyData,omitempty"`
// SecretRef references a Kubernetes secret containing the etcd connection credentials.
// The secret must contain the following data keys:
// ca.crt: The Certificate Authority (CA) certificate data.
// tls.crt: The TLS certificate data used for verifying the etcd server's certificate.
// tls.key: The TLS private key.
// Required to configure the connection to an external etcd cluster.
// +required
SecretRef LocalSecretReference `json:"secretRef"`
}
// KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes.
// Karmada uses it as it's own apiserver in order to provide Kubernetes-native APIs.
// Karmada uses it as its own apiserver in order to provide Kubernetes-native APIs.
type KarmadaAPIServer struct {
// CommonSettings holds common settings to kubernetes api server.
CommonSettings `json:",inline"`
@ -219,11 +309,31 @@ type KarmadaAPIServer struct {
// +optional
ServiceSubnet *string `json:"serviceSubnet,omitempty"`
// ServiceType represents the service type of karmada apiserver.
// it is Nodeport by default.
// ServiceType represents the service type of Karmada API server.
// Valid options are: "ClusterIP", "NodePort", "LoadBalancer".
// Defaults to "ClusterIP".
//
// +kubebuilder:default="ClusterIP"
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
// +optional
ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
// LoadBalancerClass specifies the load balancer implementation class for the Karmada API server.
// This field is applicable only when ServiceType is set to LoadBalancer.
// If specified, the service will be processed by the load balancer implementation that matches the specified class.
// By default, this is not set and the LoadBalancer type of Service uses the cloud provider's default load balancer
// implementation.
// Once set, it cannot be changed. The value must be a label-style identifier, with an optional prefix such as
// "internal-vip" or "example.com/internal-vip".
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
// +optional
LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
// ServiceAnnotations is an extra set of annotations for service of karmada apiserver.
// more info: https://github.com/karmada-io/karmada/issues/4634
// +optional
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
// ExtraArgs is an extra set of flags to pass to the kube-apiserver component or
// override. A key in this map is the flag name as it appears on the command line except
// without leading dash(es).
@ -231,7 +341,7 @@ type KarmadaAPIServer struct {
// Note: This is a temporary solution to allow for the configuration of the
// kube-apiserver component. In the future, we will provide a more structured way
// to configure the component. Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
@ -240,6 +350,24 @@ type KarmadaAPIServer struct {
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
// ExtraVolumes specifies a list of extra volumes for the API server's pod
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically attach volumes for the API server pod needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction
// with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases.
// +optional
ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"`
// ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically mount volumes into the API server container needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction
// with ExtraArgs and ExtraVolumes can be used to fulfil those use cases.
// +optional
ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"`
// CertSANs sets extra Subject Alternative Names for the API Server signing cert.
// +optional
CertSANs []string `json:"certSANs,omitempty"`
@ -248,6 +376,12 @@ type KarmadaAPIServer struct {
// More info: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
// +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// SidecarContainers specifies a list of sidecar containers to be deployed
// within the Karmada API server pod.
// This enables users to integrate auxiliary services such as KMS plugins for configuring encryption at rest.
// +optional
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
}
// KarmadaAggregatedAPIServer holds settings to karmada-aggregated-apiserver component of the karmada.
@ -262,11 +396,11 @@ type KarmadaAggregatedAPIServer struct {
// Note: This is a temporary solution to allow for the configuration of the
// karmada-aggregated-apiserver component. In the future, we will provide a more structured way
// to configure the component. Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/aggregated-apiserver/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-aggregated-apiserver
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -321,7 +455,7 @@ type KubeControllerManager struct {
// https://karmada.io/docs/administrator/configuration/configure-controllers#kubernetes-controllers
//
// Others are disabled by default. If you want to enable or disable other controllers, you
// have to explicitly specify all the controllers that kube-controller-manager shoud enable
// have to explicitly specify all the controllers that kube-controller-manager should enable
// at startup phase.
// +optional
Controllers []string `json:"controllers,omitempty"`
@ -333,7 +467,7 @@ type KubeControllerManager struct {
// Note: This is a temporary solution to allow for the configuration of the
// kube-controller-manager component. In the future, we will provide a more structured way
// to configure the component. Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
@ -375,11 +509,11 @@ type KarmadaControllerManager struct {
// Note: This is a temporary solution to allow for the configuration of the
// karmada-controller-manager component. In the future, we will provide a more structured way
// to configure the component. Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/controller-manager/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-controller-manager
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -406,11 +540,11 @@ type KarmadaScheduler struct {
// Note: This is a temporary solution to allow for the configuration of the karmada-scheduler
// component. In the future, we will provide a more structured way to configure the component.
// Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/scheduler/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-scheduler
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -434,11 +568,11 @@ type KarmadaDescheduler struct {
// Note: This is a temporary solution to allow for the configuration of the karmada-descheduler
// component. In the future, we will provide a more structured way to configure the component.
// Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/descheduler/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-descheduler
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -449,18 +583,40 @@ type KarmadaSearch struct {
// CommonSettings holds common settings to karmada search.
CommonSettings `json:",inline"`
// ExtraArgs is an extra set of flags to pass to the karmada-descheduler component or override.
// ExtraArgs is an extra set of flags to pass to the karmada-search component or override.
// A key in this map is the flag name as it appears on the command line except without
// leading dash(es).
//
// Note: This is a temporary solution to allow for the configuration of the karmada-descheduler
// Note: This is a temporary solution to allow for the configuration of the karmada-search
// component. In the future, we will provide a more structured way to configure the component.
// Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/descheduler/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-search
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
}
// KarmadaMetricsAdapter holds settings to karmada-metrics-adapter component of the karmada.
type KarmadaMetricsAdapter struct {
// CommonSettings holds common settings to karmada metrics adapter.
CommonSettings `json:",inline"`
// ExtraArgs is an extra set of flags to pass to the karmada-metrics-adapter component or override.
// A key in this map is the flag name as it appears on the command line except without
// leading dash(es).
//
// Note: This is a temporary solution to allow for the configuration of the karmada-metrics-adapter
// component. In the future, we will provide a more structured way to configure the component.
// Once that is done, this field will be discouraged to be used.
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://karmada.io/docs/reference/components/karmada-metrics-adapter
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -478,11 +634,11 @@ type KarmadaWebhook struct {
// Note: This is a temporary solution to allow for the configuration of the
// karmada-webhook component. In the future, we will provide a more structured way
// to configure the component. Once that is done, this field will be discouraged to be used.
// Incorrect settings on this feild maybe lead to the corresponding component in an unhealthy
// Incorrect settings on this field maybe lead to the corresponding component in an unhealthy
// state. Before you do it, please confirm that you understand the risks of this configuration.
//
// For supported flags, please see
// https://github.com/karmada-io/karmada/blob/master/cmd/webhook/app/options/options.go
// https://karmada.io/docs/reference/components/karmada-webhook
// for details.
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
@ -493,6 +649,11 @@ type CommonSettings struct {
// Image allows to customize the image used for the component.
Image `json:",inline"`
// ImagePullPolicy defines the policy for pulling the container image.
// If not specified, it defaults to IfNotPresent.
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
@ -516,6 +677,12 @@ type CommonSettings struct {
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// PriorityClassName specifies the priority class name for the component.
// If not specified, it defaults to "system-node-critical".
// +kubebuilder:default="system-node-critical"
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// Image allows to customize the image used for components.
@ -553,19 +720,15 @@ type HostCluster struct {
Networking *Networking `json:"networking,omitempty"`
}
// ConditionType declarative karmada condition type of karmada installtion.
// ConditionType declarative karmada condition type of karmada installation.
type ConditionType string
const (
// Unknown represent a condition type the karmada not be reconciled by operator
// or unpredictable condition.
Unknown ConditionType = "Unknown"
// Ready represent a condition type the all installtion process to karmada have compaleted.
// Ready represent a condition type the all installation process to karmada have completed.
Ready ConditionType = "Ready"
)
// KarmadaStatus difine the most recently observed status of the Karmada.
// KarmadaStatus define the most recently observed status of the Karmada.
type KarmadaStatus struct {
// ObservedGeneration is the last observed generation.
// +optional
@ -575,17 +738,32 @@ type KarmadaStatus struct {
// +optional
SecretRef *LocalSecretReference `json:"secretRef,omitempty"`
// KarmadaVersion represente the karmada version.
// KarmadaVersion represent the karmada version.
// +optional
KarmadaVersion string `json:"karmadaVersion,omitempty"`
// KubernetesVersion represente the karmada-apiserver version.
// KubernetesVersion represent the karmada-apiserver version.
// +optional
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Conditions represents the latest available observations of a karmada's current state.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// APIServerService reports the location of the Karmada API server service which
// can be used by third-party applications to discover the Karmada Service, e.g.
// expose the service outside the cluster by Ingress.
// +optional
APIServerService *APIServerService `json:"apiServerService,omitempty"`
}
// APIServerService tells the location of Karmada API server service.
// Currently, it only includes the name of the service. The namespace
// of the service is the same as the namespace of the current Karmada object.
type APIServerService struct {
// Name represents the name of the Karmada API Server service.
// +required
Name string `json:"name"`
}
// LocalSecretReference is a reference to a secret within the enclosing

View File

@ -1,16 +1,74 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServerService) DeepCopyInto(out *APIServerService) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerService.
func (in *APIServerService) DeepCopy() *APIServerService {
if in == nil {
return nil
}
out := new(APIServerService)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CRDTarball) DeepCopyInto(out *CRDTarball) {
*out = *in
if in.HTTPSource != nil {
in, out := &in.HTTPSource, &out.HTTPSource
*out = new(HTTPSource)
**out = **in
}
if in.CRDDownloadPolicy != nil {
in, out := &in.CRDDownloadPolicy, &out.CRDDownloadPolicy
*out = new(CRDDownloadPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDTarball.
func (in *CRDTarball) DeepCopy() *CRDTarball {
if in == nil {
return nil
}
out := new(CRDTarball)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonSettings) DeepCopyInto(out *CommonSettings) {
*out = *in
@ -48,6 +106,32 @@ func (in *CommonSettings) DeepCopy() *CommonSettings {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomCertificate) DeepCopyInto(out *CustomCertificate) {
*out = *in
if in.APIServerCACert != nil {
in, out := &in.APIServerCACert, &out.APIServerCACert
*out = new(LocalSecretReference)
**out = **in
}
if in.LeafCertValidityDays != nil {
in, out := &in.LeafCertValidityDays, &out.LeafCertValidityDays
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCertificate.
func (in *CustomCertificate) DeepCopy() *CustomCertificate {
if in == nil {
return nil
}
out := new(CustomCertificate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Etcd) DeepCopyInto(out *Etcd) {
*out = *in
@ -97,6 +181,7 @@ func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = make([]byte, len(*in))
copy(*out, *in)
}
out.SecretRef = in.SecretRef
return
}
@ -110,6 +195,22 @@ func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPSource) DeepCopyInto(out *HTTPSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSource.
func (in *HTTPSource) DeepCopy() *HTTPSource {
if in == nil {
return nil
}
out := new(HTTPSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostCluster) DeepCopyInto(out *HostCluster) {
*out = *in
@ -205,6 +306,18 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
*out = new(string)
**out = **in
}
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.ServiceAnnotations != nil {
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
@ -212,6 +325,20 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val
}
}
if in.ExtraVolumes != nil {
in, out := &in.ExtraVolumes, &out.ExtraVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtraVolumeMounts != nil {
in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CertSANs != nil {
in, out := &in.CertSANs, &out.CertSANs
*out = make([]string, len(*in))
@ -224,6 +351,13 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val
}
}
if in.SidecarContainers != nil {
in, out := &in.SidecarContainers, &out.SidecarContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -321,6 +455,11 @@ func (in *KarmadaComponents) DeepCopyInto(out *KarmadaComponents) {
*out = new(KarmadaSearch)
(*in).DeepCopyInto(*out)
}
if in.KarmadaMetricsAdapter != nil {
in, out := &in.KarmadaMetricsAdapter, &out.KarmadaMetricsAdapter
*out = new(KarmadaMetricsAdapter)
(*in).DeepCopyInto(*out)
}
return
}
@ -427,6 +566,30 @@ func (in *KarmadaList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KarmadaMetricsAdapter) DeepCopyInto(out *KarmadaMetricsAdapter) {
*out = *in
in.CommonSettings.DeepCopyInto(&out.CommonSettings)
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KarmadaMetricsAdapter.
func (in *KarmadaMetricsAdapter) DeepCopy() *KarmadaMetricsAdapter {
if in == nil {
return nil
}
out := new(KarmadaMetricsAdapter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KarmadaScheduler) DeepCopyInto(out *KarmadaScheduler) {
*out = *in
@ -507,6 +670,21 @@ func (in *KarmadaSpec) DeepCopyInto(out *KarmadaSpec) {
(*out)[key] = val
}
}
if in.CRDTarball != nil {
in, out := &in.CRDTarball, &out.CRDTarball
*out = new(CRDTarball)
(*in).DeepCopyInto(*out)
}
if in.CustomCertificate != nil {
in, out := &in.CustomCertificate, &out.CustomCertificate
*out = new(CustomCertificate)
(*in).DeepCopyInto(*out)
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
return
}
@ -530,11 +708,16 @@ func (in *KarmadaStatus) DeepCopyInto(out *KarmadaStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.APIServerService != nil {
in, out := &in.APIServerService, &out.APIServerService
*out = new(APIServerService)
**out = **in
}
return
}
@ -682,17 +865,17 @@ func (in *VolumeData) DeepCopyInto(out *VolumeData) {
*out = *in
if in.VolumeClaim != nil {
in, out := &in.VolumeClaim, &out.VolumeClaim
*out = new(corev1.PersistentVolumeClaimTemplate)
*out = new(v1.PersistentVolumeClaimTemplate)
(*in).DeepCopyInto(*out)
}
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
*out = new(corev1.HostPathVolumeSource)
*out = new(v1.HostPathVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
*out = new(corev1.EmptyDirVolumeSource)
*out = new(v1.EmptyDirVolumeSource)
(*in).DeepCopyInto(*out)
}
return

View File

@ -0,0 +1,129 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:path=clustertaintpolicies,scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicy automates taint management on Cluster objects based
// on declarative conditions.
// The system evaluates AddOnConditions to determine when to add taints,
// and RemoveOnConditions to determine when to remove taints.
// AddOnConditions are evaluated before RemoveOnConditions.
// Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
type ClusterTaintPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of ClusterTaintPolicy.
// +required
Spec ClusterTaintPolicySpec `json:"spec"`
}
// ClusterTaintPolicySpec represents the desired behavior of ClusterTaintPolicy.
type ClusterTaintPolicySpec struct {
// TargetClusters specifies the clusters that ClusterTaintPolicy needs
// to pay attention to.
// For clusters that no longer match the TargetClusters, the taints
// will be kept unchanged.
// If targetClusters is not set, any cluster can be selected.
// +optional
TargetClusters *ClusterAffinity `json:"targetClusters,omitempty"`
// AddOnConditions defines the conditions to match for triggering
// the controller to add taints on the cluster object.
// The match conditions are ANDed.
// If AddOnConditions is empty, no taints will be added.
// +optional
AddOnConditions []MatchCondition `json:"addOnConditions,omitempty"`
// RemoveOnConditions defines the conditions to match for triggering
// the controller to remove taints from the cluster object.
// The match conditions are ANDed.
// If RemoveOnConditions is empty, no taints will be removed.
// +optional
RemoveOnConditions []MatchCondition `json:"removeOnConditions,omitempty"`
// Taints specifies the taints that need to be added or removed on
// the cluster object which match with TargetClusters.
// If the Taints is modified, the system will process the taints based on
// the latest value of Taints during the next condition-triggered execution,
// regardless of whether the taint has been added or removed.
// +kubebuilder:validation:MinItems=1
// +required
Taints []Taint `json:"taints"`
}
// MatchCondition represents the condition match detail of activating the failover
// relevant taints on target clusters.
type MatchCondition struct {
// ConditionType specifies the ClusterStatus condition type.
// +required
ConditionType string `json:"conditionType"`
// Operator represents a relationship to a set of values.
// Valid operators are In, NotIn.
// +required
Operator MatchConditionOperator `json:"operator"`
// StatusValues is an array of metav1.ConditionStatus values.
// The item specifies the ClusterStatus condition status.
// +required
StatusValues []metav1.ConditionStatus `json:"statusValues"`
}
// A MatchConditionOperator operator is the set of operators that can be used in the match condition.
type MatchConditionOperator string
const (
// MatchConditionOpIn represents the operator In.
MatchConditionOpIn MatchConditionOperator = "In"
// MatchConditionOpNotIn represents the operator NotIn.
MatchConditionOpNotIn MatchConditionOperator = "NotIn"
)
// Taint describes the taint that needs to be applied to the cluster.
type Taint struct {
// Key represents the taint key to be applied to a cluster.
// +required
Key string `json:"key"`
// Effect represents the taint effect to be applied to a cluster.
// +required
Effect corev1.TaintEffect `json:"effect"`
// Value represents the taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty"`
}
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicyList contains a list of ClusterTaintPolicy
type ClusterTaintPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterTaintPolicy `json:"items"`
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -18,9 +34,11 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={karmada-io}
// +kubebuilder:resource:path=federatedresourcequotas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.overall`,name=`OVERALL`,type=string
// +kubebuilder:printcolumn:JSONPath=`.status.overallUsed`,name=`OVERALL_USED`,type=string
// FederatedResourceQuota sets aggregate quota restrictions enforced per namespace across all clusters.
type FederatedResourceQuota struct {
@ -42,9 +60,16 @@ type FederatedResourceQuotaSpec struct {
// +required
Overall corev1.ResourceList `json:"overall"`
// StaticAssignments represents the subset of desired hard limits for each cluster.
// Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
// clusters will have no quotas in the referencing namespace.
// StaticAssignments specifies ResourceQuota settings for specific clusters.
// If non-empty, Karmada will create ResourceQuotas in the corresponding clusters.
// Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created.
//
// This field addresses multi-cluster configuration management challenges by allowing centralized
// control over ResourceQuotas across clusters.
//
// Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
// Future updates may integrate it into the scheduling logic.
//
// +optional
StaticAssignments []StaticClusterAssignment `json:"staticAssignments,omitempty"`

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -27,7 +43,7 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:shortName=op,categories={karmada-io}
// +kubebuilder:resource:path=overridepolicies,scope=Namespaced,shortName=op,categories={karmada-io}
// OverridePolicy represents the policy that overrides a group of resources to one or more clusters.
type OverridePolicy struct {
@ -85,6 +101,7 @@ type RuleWithCluster struct {
// - ArgsOverrider
// - LabelsOverrider
// - AnnotationsOverrider
// - FieldOverrider
// - Plaintext
type Overriders struct {
// Plaintext represents override rules defined with plaintext overriders.
@ -110,6 +127,13 @@ type Overriders struct {
// AnnotationsOverrider represents the rules dedicated to handling workload annotations
// +optional
AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"`
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// +optional
FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"`
}
// LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations
@ -239,6 +263,65 @@ const (
OverriderOpReplace OverriderOperator = "replace"
)
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.
type FieldOverrider struct {
// FieldPath specifies the initial location in the instance document where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml"
// specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml".
// +required
FieldPath string `json:"fieldPath"`
// JSON represents the operations performed on the JSON document specified by the FieldPath.
// +optional
JSON []JSONPatchOperation `json:"json,omitempty"`
// YAML represents the operations performed on the YAML document specified by the FieldPath.
// +optional
YAML []YAMLPatchOperation `json:"yaml,omitempty"`
}
// JSONPatchOperation represents a single field modification operation for JSON format.
type JSONPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// YAMLPatchOperation represents a single field modification operation for YAML format.
type YAMLPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OverridePolicyList is a collection of OverridePolicy.
@ -252,7 +335,7 @@ type OverridePolicyList struct {
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:scope="Cluster",shortName=cop,categories={karmada-io}
// +kubebuilder:resource:path=clusteroverridepolicies,scope="Cluster",shortName=cop,categories={karmada-io}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterOverridePolicy represents the cluster-wide policy that overrides a group of resources to one or more clusters.

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// GetName returns the name of OverridePolicy

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// ExplicitPriority returns the explicit priority declared

View File

@ -1,9 +1,25 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"testing"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"
)
func TestPropagationPolicy_ExplicitPriority(t *testing.T) {
@ -18,7 +34,7 @@ func TestPropagationPolicy_ExplicitPriority(t *testing.T) {
},
{
name: "expected to be declared priority in pp",
declaredPriority: pointer.Int32(20),
declaredPriority: ptr.To[int32](20),
expectedPriority: 20,
},
}
@ -46,7 +62,7 @@ func TestClusterPropagationPolicy_ExplicitPriority(t *testing.T) {
},
{
name: "expected to be declared priority in cpp",
declaredPriority: pointer.Int32(20),
declaredPriority: ptr.To[int32](20),
expectedPriority: 20,
},
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -27,7 +43,10 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:shortName=pp,categories={karmada-io}
// +kubebuilder:resource:path=propagationpolicies,scope=Namespaced,shortName=pp,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// PropagationPolicy represents the policy that propagates a group of resources to one or more clusters.
type PropagationPolicy struct {
@ -76,6 +95,7 @@ type PropagationSpec struct {
// template be processed.
// Once a resource template has been claimed by a policy, by default it will
// not be preempted by following policies even with a higher priority.
// See Preemption for more details.
//
// In case of two policies have the same priority, the one with a more precise
// matching rules in ResourceSelectors wins:
@ -91,6 +111,14 @@ type PropagationSpec struct {
// +kubebuilder:default=0
Priority *int32 `json:"priority,omitempty"`
// Preemption declares the behaviors for preempting.
// Valid options are "Always" and "Never".
//
// +kubebuilder:default="Never"
// +kubebuilder:validation:Enum=Always;Never
// +optional
Preemption PreemptionBehavior `json:"preemption,omitempty"`
// DependentOverrides represents the list of overrides(OverridePolicy)
// which must present before the current PropagationPolicy takes effect.
//
@ -114,6 +142,81 @@ type PropagationSpec struct {
// If this value is nil, failover is disabled.
// +optional
Failover *FailoverBehavior `json:"failover,omitempty"`
// ConflictResolution declares how potential conflict should be handled when
// a resource that is being propagated already exists in the target cluster.
//
// It defaults to "Abort" which means stop propagating to avoid unexpected
// overwrites. The "Overwrite" might be useful when migrating legacy cluster
// resources to Karmada, in which case conflict is predictable and can be
// instructed to Karmada take over the resource by overwriting.
//
// +kubebuilder:default="Abort"
// +kubebuilder:validation:Enum=Abort;Overwrite
// +optional
ConflictResolution ConflictResolution `json:"conflictResolution,omitempty"`
// ActivationPreference indicates how the referencing resource template will
// be propagated, in case of policy changes.
//
// If empty, the resource template will respond to policy changes
// immediately, in other words, any policy changes will drive the resource
// template to be propagated immediately as per the current propagation rules.
//
// If the value is 'Lazy' means the policy changes will not take effect for now
// but defer to the resource template changes, in other words, the resource
// template will not be propagated as per the current propagation rules until
// there is an update on it.
// This is an experimental feature that might help in a scenario where a policy
// manages huge amount of resource templates, changes to a policy typically
// affect numerous applications simultaneously. A minor misconfiguration
// could lead to widespread failures. With this feature, the change can be
// gradually rolled out through iterative modifications of resource templates.
//
// +kubebuilder:validation:Enum=Lazy
// +optional
ActivationPreference ActivationPreference `json:"activationPreference,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the resource template is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the resource template.
//
// This setting is particularly useful during workload migration scenarios to ensure
// that rollback can occur quickly without affecting the workloads running on the
// member clusters.
//
// Additionally, this setting applies uniformly across all member clusters and will not
// selectively control preservation on only some clusters.
//
// Note: This setting does not apply to the deletion of the policy itself.
// When the policy is deleted, the resource templates and their corresponding
// propagated resources in member clusters will remain unchanged unless explicitly deleted.
//
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
//
// This setting is useful for controlling the scheduling behavior of offline workloads.
// By setting a higher or lower priority, users can control which workloads are scheduled first.
// Additionally, it allows specifying a preemption policy where higher-priority workloads can
// preempt lower-priority ones in scenarios of resource contention.
//
// Note: This feature is currently in the alpha stage. The priority-based scheduling functionality is
// controlled by the PriorityBasedScheduling feature gate, and preemption is controlled by the
// PriorityBasedPreemptiveScheduling feature gate. Currently, only priority-based scheduling is
// supported. Preemption functionality is not yet available and will be introduced in future
// releases as the feature matures.
//
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
}
// ResourceSelector the resources will be selected.
@ -148,13 +251,44 @@ type FieldSelector struct {
MatchExpressions []corev1.NodeSelectorRequirement `json:"matchExpressions,omitempty"`
}
// PurgeMode represents that how to deal with the legacy applications on the
// Suspension defines the policy for suspending different aspects of propagation.
type Suspension struct {
// Dispatching controls whether dispatching should be suspended.
// nil means not suspend, no default value, only accepts 'true'.
// Note: true means stop propagating to all clusters. Can not co-exist
// with DispatchingOnClusters which is used to suspend particular clusters.
// +optional
Dispatching *bool `json:"dispatching,omitempty"`
// DispatchingOnClusters declares a list of clusters to which the dispatching
// should be suspended.
// Note: Can not co-exist with Dispatching which is used to suspend all.
// +optional
DispatchingOnClusters *SuspendClusters `json:"dispatchingOnClusters,omitempty"`
}
// SuspendClusters represents a group of clusters that should be suspended from propagating.
// Note: No plan to introduce the label selector or field selector to select clusters yet, as it
// would make the system unpredictable.
type SuspendClusters struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// PurgeMode represents how to deal with the legacy application on the
// cluster from which the application is migrated.
type PurgeMode string
const (
// Immediately represents that Karmada will immediately evict the legacy
// application.
// application. This is useful in scenarios where an application can not
// tolerate two instances running simultaneously.
// For example, the Flink application supports exactly-once state consistency,
// which means it requires that no two instances of the application are running
// at the same time. During a failover, it is crucial to ensure that the old
// application is removed before creating a new one to avoid duplicate
// processing and maintaining state consistency.
Immediately PurgeMode = "Immediately"
// Graciously represents that Karmada will wait for the application to
// come back to healthy on the new cluster or after a timeout is reached
@ -194,6 +328,7 @@ type ApplicationFailoverBehavior struct {
// cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never".
// Defaults to "Graciously".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +kubebuilder:default=Graciously
// +optional
PurgeMode PurgeMode `json:"purgeMode,omitempty"`
@ -206,6 +341,23 @@ type ApplicationFailoverBehavior struct {
// Value must be positive integer.
// +optional
GracePeriodSeconds *int32 `json:"gracePeriodSeconds,omitempty"`
// StatePreservation defines the policy for preserving and restoring state data
// during failover events for stateful applications.
//
// When an application fails over from one cluster to another, this policy enables
// the extraction of critical data from the original resource configuration.
// Upon successful migration, the extracted data is then re-injected into the new
// resource, ensuring that the application can resume operation with its previous
// state intact.
// This is particularly useful for stateful applications where maintaining data
// consistency across failover events is crucial.
// If not specified, means no state data will be preserved.
//
// Note: This requires the StatefulFailoverInjection feature gate to be enabled,
// which is alpha.
// +optional
StatePreservation *StatePreservation `json:"statePreservation,omitempty"`
}
// DecisionConditions represents the decision conditions of performing the failover process.
@ -219,6 +371,41 @@ type DecisionConditions struct {
TolerationSeconds *int32 `json:"tolerationSeconds,omitempty"`
}
// StatePreservation defines the policy for preserving state during failover events.
type StatePreservation struct {
// Rules contains a list of StatePreservationRule configurations.
// Each rule specifies a JSONPath expression targeting specific pieces of
// state data to be preserved during failover events. An AliasLabelName is associated
// with each rule, serving as a label key when the preserved data is passed
// to the new cluster.
// +required
Rules []StatePreservationRule `json:"rules"`
}
// StatePreservationRule defines a single rule for state preservation.
// It includes a JSONPath expression and an alias name that will be used
// as a label key when passing state information to the new cluster.
type StatePreservationRule struct {
// AliasLabelName is the name that will be used as a label key when the preserved
// data is passed to the new cluster. This facilitates the injection of the
// preserved state back into the application resources during recovery.
// +required
AliasLabelName string `json:"aliasLabelName"`
// JSONPath is the JSONPath template used to identify the state data
// to be preserved from the original resource configuration.
// The JSONPath syntax follows the Kubernetes specification:
// https://kubernetes.io/docs/reference/kubectl/jsonpath/
//
// Note: The JSONPath expression will start searching from the "status" field of
// the API resource object by default. For example, to extract the "availableReplicas"
// from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
// "{.status.availableReplicas}".
//
// +required
JSONPath string `json:"jsonPath"`
}
// Placement represents the rule for select clusters.
type Placement struct {
// ClusterAffinity represents scheduling restrictions to a certain set of clusters.
@ -443,6 +630,101 @@ const (
DynamicWeightByAvailableReplicas DynamicWeightFactor = "AvailableReplicas"
)
// PreemptionBehavior describes whether and how to preempt resources that are
// claimed by lower-priority PropagationPolicy(ClusterPropagationPolicy).
// +enum
type PreemptionBehavior string
const (
// PreemptAlways means that preemption is allowed.
//
// If it is applied to a PropagationPolicy, it can preempt any resource as
// per Priority, regardless of whether it has been claimed by a PropagationPolicy
// or a ClusterPropagationPolicy, as long as it can match the rules defined
// in ResourceSelector. In addition, if a resource has already been claimed
// by a ClusterPropagationPolicy, the PropagationPolicy can still preempt it
// without considering Priority.
//
// If it is applied to a ClusterPropagationPolicy, it can only preempt from
// ClusterPropagationPolicy, and from PropagationPolicy is not allowed.
PreemptAlways PreemptionBehavior = "Always"
// PreemptNever means that a PropagationPolicy(ClusterPropagationPolicy) never
// preempts resources.
PreemptNever PreemptionBehavior = "Never"
)
// ConflictResolution describes how to resolve the conflict during the process
// of propagation especially the resource already in a member cluster.
type ConflictResolution string
const (
// ConflictOverwrite means that resolve the conflict by overwriting the
// resource with the propagating resource template.
ConflictOverwrite ConflictResolution = "Overwrite"
// ConflictAbort means that do not resolve the conflict and stop propagating.
ConflictAbort ConflictResolution = "Abort"
)
// ActivationPreference indicates how the referencing resource template will be propagated, in case of policy changes.
type ActivationPreference string
const (
// LazyActivation means the policy changes will not take effect for now but defer to the resource template changes,
// in other words, the resource template will not be propagated as per the current propagation rules until
// there is an update on it.
LazyActivation ActivationPreference = "Lazy"
)
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
type SchedulePriority struct {
// PriorityClassSource specifies where Karmada should look for the PriorityClass definition.
// Available options:
// - KubePriorityClass: Uses Kubernetes PriorityClass (scheduling.k8s.io/v1)
// - PodPriorityClass: Uses PriorityClassName from PodTemplate: PodSpec.PriorityClassName (not yet implemented)
// - FederatedPriorityClass: Uses Karmada FederatedPriorityClass (not yet implemented)
//
// +kubebuilder:validation:Enum=KubePriorityClass
// +required
PriorityClassSource PriorityClassSource `json:"priorityClassSource"`
// PriorityClassName specifies which PriorityClass to use. Its behavior depends on PriorityClassSource:
//
// Behavior of PriorityClassName:
//
// For KubePriorityClass:
// - When specified: Uses the named Kubernetes PriorityClass.
//
// For PodPriorityClass:
// - Uses PriorityClassName from the PodTemplate.
// - Not yet implemented.
//
// For FederatedPriorityClass:
// - Not yet implemented.
//
// +required
PriorityClassName string `json:"priorityClassName"`
}
// PriorityClassSource defines the type for PriorityClassSource field.
type PriorityClassSource string
const (
// FederatedPriorityClass specifies to use Karmada FederatedPriorityClass for priority resolution.
// This feature is planned for future releases and is currently not implemented.
FederatedPriorityClass PriorityClassSource = "FederatedPriorityClass"
// KubePriorityClass specifies to use Kubernetes native PriorityClass (scheduling.k8s.io/v1)
// for priority resolution. This is the default source.
KubePriorityClass PriorityClassSource = "KubePriorityClass"
// PodPriorityClass specifies to use the PriorityClassName defined in the workload's
// PodTemplate for priority resolution.
PodPriorityClass PriorityClassSource = "PodPriorityClass"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PropagationPolicyList contains a list of PropagationPolicy.
@ -454,8 +736,11 @@ type PropagationPolicyList struct {
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:scope="Cluster",shortName=cpp,categories={karmada-io}
// +kubebuilder:resource:path=clusterpropagationpolicies,scope="Cluster",shortName=cpp,categories={karmada-io}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ClusterPropagationPolicy represents the cluster-wide policy that propagates a group of resources to one or more clusters.
// Different with PropagationPolicy that could only propagate resources in its own namespace, ClusterPropagationPolicy

View File

@ -1,14 +1,36 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// The well-known label key constant.
const (
// PropagationPolicyNamespaceLabel is added to objects to specify associated PropagationPolicy namespace.
PropagationPolicyNamespaceLabel = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyPermanentIDLabel is the identifier of a PropagationPolicy object.
// Karmada generates a unique identifier, such as metadata.UUID, for each PropagationPolicy object.
// This identifier will be used as a label selector to locate corresponding resources, such as ResourceBinding.
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
PropagationPolicyPermanentIDLabel = "propagationpolicy.karmada.io/permanent-id"
// PropagationPolicyNameLabel is added to objects to specify associated PropagationPolicy's name.
PropagationPolicyNameLabel = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyLabel is added to objects to specify associated ClusterPropagationPolicy.
ClusterPropagationPolicyLabel = "clusterpropagationpolicy.karmada.io/name"
// ClusterPropagationPolicyPermanentIDLabel is the identifier of a ClusterPropagationPolicy object.
// Karmada generates a unique identifier, such as metadata.UUID, for each ClusterPropagationPolicy object.
// This identifier will be used as a label selector to locate corresponding resources, such as ResourceBinding.
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
ClusterPropagationPolicyPermanentIDLabel = "clusterpropagationpolicy.karmada.io/permanent-id"
// NamespaceSkipAutoPropagationLabel is added to namespace objects to indicate if
// the namespace should be skipped from propagating by the namespace controller.
@ -20,3 +42,15 @@ const (
// synced to new member clusters, but old member clusters still have it.
NamespaceSkipAutoPropagationLabel = "namespace.karmada.io/skip-auto-propagation"
)
// The well-known annotation key constant.
const (
// PropagationPolicyNamespaceAnnotation is added to objects to specify associated PropagationPolicy namespace.
PropagationPolicyNamespaceAnnotation = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyNameAnnotation is added to objects to specify associated PropagationPolicy name.
PropagationPolicyNameAnnotation = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyAnnotation is added to objects to specify associated ClusterPropagationPolicy name.
ClusterPropagationPolicyAnnotation = "clusterpropagationpolicy.karmada.io/name"
)

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -20,6 +36,11 @@ func (in *ApplicationFailoverBehavior) DeepCopyInto(out *ApplicationFailoverBeha
*out = new(int32)
**out = **in
}
if in.StatePreservation != nil {
in, out := &in.StatePreservation, &out.StatePreservation
*out = new(StatePreservation)
(*in).DeepCopyInto(*out)
}
return
}
@ -246,6 +267,106 @@ func (in *ClusterQuotaStatus) DeepCopy() *ClusterQuotaStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicy) DeepCopyInto(out *ClusterTaintPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicy.
func (in *ClusterTaintPolicy) DeepCopy() *ClusterTaintPolicy {
if in == nil {
return nil
}
out := new(ClusterTaintPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicyList) DeepCopyInto(out *ClusterTaintPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterTaintPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicyList.
func (in *ClusterTaintPolicyList) DeepCopy() *ClusterTaintPolicyList {
if in == nil {
return nil
}
out := new(ClusterTaintPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicySpec) DeepCopyInto(out *ClusterTaintPolicySpec) {
*out = *in
if in.TargetClusters != nil {
in, out := &in.TargetClusters, &out.TargetClusters
*out = new(ClusterAffinity)
(*in).DeepCopyInto(*out)
}
if in.AddOnConditions != nil {
in, out := &in.AddOnConditions, &out.AddOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RemoveOnConditions != nil {
in, out := &in.RemoveOnConditions, &out.RemoveOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]Taint, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicySpec.
func (in *ClusterTaintPolicySpec) DeepCopy() *ClusterTaintPolicySpec {
if in == nil {
return nil
}
out := new(ClusterTaintPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommandArgsOverrider) DeepCopyInto(out *CommandArgsOverrider) {
*out = *in
@ -437,6 +558,36 @@ func (in *FederatedResourceQuotaStatus) DeepCopy() *FederatedResourceQuotaStatus
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldOverrider) DeepCopyInto(out *FieldOverrider) {
*out = *in
if in.JSON != nil {
in, out := &in.JSON, &out.JSON
*out = make([]JSONPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.YAML != nil {
in, out := &in.YAML, &out.YAML
*out = make([]YAMLPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldOverrider.
func (in *FieldOverrider) DeepCopy() *FieldOverrider {
if in == nil {
return nil
}
out := new(FieldOverrider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldSelector) DeepCopyInto(out *FieldSelector) {
*out = *in
@ -497,6 +648,23 @@ func (in *ImagePredicate) DeepCopy() *ImagePredicate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONPatchOperation) DeepCopyInto(out *JSONPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchOperation.
func (in *JSONPatchOperation) DeepCopy() *JSONPatchOperation {
if in == nil {
return nil
}
out := new(JSONPatchOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) {
*out = *in
@ -520,6 +688,27 @@ func (in *LabelAnnotationOverrider) DeepCopy() *LabelAnnotationOverrider {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
if in.StatusValues != nil {
in, out := &in.StatusValues, &out.StatusValues
*out = make([]v1.ConditionStatus, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition.
func (in *MatchCondition) DeepCopy() *MatchCondition {
if in == nil {
return nil
}
out := new(MatchCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OverridePolicy) DeepCopyInto(out *OverridePolicy) {
*out = *in
@ -661,6 +850,13 @@ func (in *Overriders) DeepCopyInto(out *Overriders) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FieldOverrider != nil {
in, out := &in.FieldOverrider, &out.FieldOverrider
*out = make([]FieldOverrider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -822,6 +1018,21 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) {
*out = new(FailoverBehavior)
(*in).DeepCopyInto(*out)
}
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return
}
@ -899,6 +1110,22 @@ func (in *RuleWithCluster) DeepCopy() *RuleWithCluster {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SpreadConstraint) DeepCopyInto(out *SpreadConstraint) {
*out = *in
@ -915,6 +1142,43 @@ func (in *SpreadConstraint) DeepCopy() *SpreadConstraint {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservation) DeepCopyInto(out *StatePreservation) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]StatePreservationRule, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservation.
func (in *StatePreservation) DeepCopy() *StatePreservation {
if in == nil {
return nil
}
out := new(StatePreservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservationRule) DeepCopyInto(out *StatePreservationRule) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservationRule.
func (in *StatePreservationRule) DeepCopy() *StatePreservationRule {
if in == nil {
return nil
}
out := new(StatePreservationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StaticClusterAssignment) DeepCopyInto(out *StaticClusterAssignment) {
*out = *in
@ -954,3 +1218,83 @@ func (in *StaticClusterWeight) DeepCopy() *StaticClusterWeight {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuspendClusters) DeepCopyInto(out *SuspendClusters) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendClusters.
func (in *SuspendClusters) DeepCopy() *SuspendClusters {
if in == nil {
return nil
}
out := new(SuspendClusters)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
if in.Dispatching != nil {
in, out := &in.Dispatching, &out.Dispatching
*out = new(bool)
**out = **in
}
if in.DispatchingOnClusters != nil {
in, out := &in.DispatchingOnClusters, &out.DispatchingOnClusters
*out = new(SuspendClusters)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
func (in *Taint) DeepCopy() *Taint {
if in == nil {
return nil
}
out := new(Taint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *YAMLPatchOperation) DeepCopyInto(out *YAMLPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YAMLPatchOperation.
func (in *YAMLPatchOperation) DeepCopy() *YAMLPatchOperation {
if in == nil {
return nil
}
out := new(YAMLPatchOperation)
in.DeepCopyInto(out)
return out
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
@ -46,6 +65,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ClusterOverridePolicyList{},
&ClusterPropagationPolicy{},
&ClusterPropagationPolicyList{},
&ClusterTaintPolicy{},
&ClusterTaintPolicyList{},
&FederatedResourceQuota{},
&FederatedResourceQuotaList{},
&OverridePolicy{},

21
remedy/v1alpha1/doc.go Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
// +groupName=remedy.karmada.io
package v1alpha1

View File

@ -0,0 +1,123 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=remedies,scope="Cluster",categories={karmada-io}
// Remedy represents the cluster-level management strategies based on cluster conditions.
type Remedy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of Remedy.
// +required
Spec RemedySpec `json:"spec"`
}
// RemedySpec represents the desired behavior of Remedy.
type RemedySpec struct {
// ClusterAffinity specifies the clusters that Remedy needs to pay attention to.
// For clusters that meet the DecisionConditions, Actions will be preformed.
// If empty, all clusters will be selected.
// +optional
ClusterAffinity *ClusterAffinity `json:"clusterAffinity,omitempty"`
// DecisionMatches indicates the decision matches of triggering the remedy
// system to perform the actions. As long as any one DecisionMatch matches,
// the Actions will be preformed.
// If empty, the Actions will be performed immediately.
// +optional
DecisionMatches []DecisionMatch `json:"decisionMatches,omitempty"`
// Actions specifies the actions that remedy system needs to perform.
// If empty, no action will be performed.
// +optional
Actions []RemedyAction `json:"actions,omitempty"`
}
// DecisionMatch represents the decision match detail of activating the remedy system.
type DecisionMatch struct {
// ClusterConditionMatch describes the cluster condition requirement.
// +optional
ClusterConditionMatch *ClusterConditionRequirement `json:"clusterConditionMatch,omitempty"`
}
// ClusterConditionRequirement describes the Cluster condition requirement details.
type ClusterConditionRequirement struct {
// ConditionType specifies the ClusterStatus condition type.
// +required
ConditionType ConditionType `json:"conditionType"`
// Operator represents a conditionType's relationship to a conditionStatus.
// Valid operators are Equal, NotEqual.
//
// +kubebuilder:validation:Enum=Equal;NotEqual
// +required
Operator ClusterConditionOperator `json:"operator"`
// ConditionStatus specifies the ClusterStatue condition status.
// +required
ConditionStatus string `json:"conditionStatus"`
}
// ConditionType represents the detection ClusterStatus condition type.
type ConditionType string
const (
// ServiceDomainNameResolutionReady expresses the detection of the domain name resolution
// function of Service in the Kubernetes cluster.
ServiceDomainNameResolutionReady ConditionType = "ServiceDomainNameResolutionReady"
)
// ClusterConditionOperator is the set of operators that can be used in the cluster condition requirement.
type ClusterConditionOperator string
const (
// ClusterConditionEqual means equal match.
ClusterConditionEqual ClusterConditionOperator = "Equal"
// ClusterConditionNotEqual means not equal match.
ClusterConditionNotEqual ClusterConditionOperator = "NotEqual"
)
// ClusterAffinity represents the filter to select clusters.
type ClusterAffinity struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// RemedyAction represents the action type the remedy system needs to preform.
type RemedyAction string
const (
// TrafficControl indicates that the cluster requires traffic control.
TrafficControl RemedyAction = "TrafficControl"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemedyList contains a list of Remedy.
type RemedyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Remedy `json:"items"`
}

View File

@ -0,0 +1,28 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// ResourceKindRemedy is kind name of Remedy.
ResourceKindRemedy = "Remedy"
// ResourceSingularRemedy is singular name of Remedy.
ResourceSingularRemedy = "remedy"
// ResourcePluralRemedy is plural name of Remedy.
ResourcePluralRemedy = "remedies"
// ResourceNamespaceScopedRemedy indicates if Remedy is NamespaceScoped.
ResourceNamespaceScopedRemedy = false
)

View File

@ -0,0 +1,177 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterAffinity) DeepCopyInto(out *ClusterAffinity) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAffinity.
func (in *ClusterAffinity) DeepCopy() *ClusterAffinity {
if in == nil {
return nil
}
out := new(ClusterAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterConditionRequirement) DeepCopyInto(out *ClusterConditionRequirement) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConditionRequirement.
func (in *ClusterConditionRequirement) DeepCopy() *ClusterConditionRequirement {
if in == nil {
return nil
}
out := new(ClusterConditionRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DecisionMatch) DeepCopyInto(out *DecisionMatch) {
*out = *in
if in.ClusterConditionMatch != nil {
in, out := &in.ClusterConditionMatch, &out.ClusterConditionMatch
*out = new(ClusterConditionRequirement)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecisionMatch.
func (in *DecisionMatch) DeepCopy() *DecisionMatch {
if in == nil {
return nil
}
out := new(DecisionMatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Remedy) DeepCopyInto(out *Remedy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Remedy.
func (in *Remedy) DeepCopy() *Remedy {
if in == nil {
return nil
}
out := new(Remedy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Remedy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemedyList) DeepCopyInto(out *RemedyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Remedy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemedyList.
func (in *RemedyList) DeepCopy() *RemedyList {
if in == nil {
return nil
}
out := new(RemedyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemedyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemedySpec) DeepCopyInto(out *RemedySpec) {
*out = *in
if in.ClusterAffinity != nil {
in, out := &in.ClusterAffinity, &out.ClusterAffinity
*out = new(ClusterAffinity)
(*in).DeepCopyInto(*out)
}
if in.DecisionMatches != nil {
in, out := &in.DecisionMatches, &out.DecisionMatches
*out = make([]DecisionMatch, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Actions != nil {
in, out := &in.Actions, &out.Actions
*out = make([]RemedyAction, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemedySpec.
func (in *RemedySpec) DeepCopy() *RemedySpec {
if in == nil {
return nil
}
out := new(RemedySpec)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,70 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "remedy.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Remedy{},
&RemedyList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package search is the internal version of the API.
// +k8s:deepcopy-gen=package
// +groupName=search.karmada.io

View File

@ -1,7 +1,24 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/search"
@ -11,6 +28,6 @@ import (
// Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) {
utilruntime.Must(search.AddToScheme(scheme))
utilruntime.Must(searchv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(searchv1alpha1.SchemeGroupVersion))
utilruntime.Must(searchv1alpha1.Install(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: searchv1alpha1.GroupVersion.Group, Version: searchv1alpha1.GroupVersion.Version}))
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package search
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
@ -29,14 +45,4 @@ func init() {
// we need to add the options to empty v1
// TODO fix the server code to avoid this
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
// TODO: keep the generic API server from wanting this
unversioned := schema.GroupVersion{Group: "", Version: "v1"}
Scheme.AddUnversionedTypes(unversioned,
&metav1.Status{},
&metav1.APIVersions{},
&metav1.APIGroupList{},
&metav1.APIGroup{},
&metav1.APIResourceList{},
)
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package search
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,3 +1,19 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -103,7 +119,7 @@ type ResourceRegistryStatus struct {
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceRegistryList if a collection of ResourceRegistry.
// ResourceRegistryList is a collection of ResourceRegistry.
type ResourceRegistryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package search

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -9,7 +25,7 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=rb,categories={karmada-io}
// +kubebuilder:resource:path=resourcebindings,scope=Namespaced,shortName=rb,categories={karmada-io}
// ResourceBinding represents a binding of a kubernetes resource with a propagation policy.
type ResourceBinding struct {
@ -119,7 +135,7 @@ type ResourceBindingList struct {
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:resource:path=clusterresourcebindings,scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:subresource:status
// ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy.
@ -135,7 +151,6 @@ type ClusterResourceBinding struct {
Status ResourceBindingStatus `json:"status,omitempty"`
}
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceBindingList contains a list of ClusterResourceBinding.

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 is the v1alpha1 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,18 +0,0 @@
package v1alpha1
const (
// ResourceBindingNamespaceLabel is added to objects to specify associated ResourceBinding's namespace.
ResourceBindingNamespaceLabel = "resourcebinding.karmada.io/namespace"
// ResourceBindingNameLabel is added to objects to specify associated ResourceBinding's name.
ResourceBindingNameLabel = "resourcebinding.karmada.io/name"
// ClusterResourceBindingLabel is added to objects to specify associated ClusterResourceBinding.
ClusterResourceBindingLabel = "clusterresourcebinding.karmada.io/name"
// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"
// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"
)

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
@ -19,9 +35,10 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:categories={karmada-io},shortName=wk
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="Applied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:resource:path=works,scope=Namespaced,shortName=wk,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.workload.manifests[*].kind`,name="WORKLOAD-KIND",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="APPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// Work defines a list of resources to be deployed on the member cluster.
type Work struct {
@ -40,6 +57,20 @@ type Work struct {
type WorkSpec struct {
// Workload represents the manifest workload to be deployed on managed cluster.
Workload WorkloadTemplate `json:"workload,omitempty"`
// SuspendDispatching controls whether dispatching should
// be suspended, nil means not suspend.
// Note: true means stop propagating to the corresponding member cluster, and
// does not prevent status collection.
// +optional
SuspendDispatching *bool `json:"suspendDispatching,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member cluster when the Work object is deleted.
// If set to true, resources will be preserved on the member cluster.
// Default is false, which means resources will be deleted along with the Work object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
}
// WorkloadTemplate represents the manifest workload to be deployed on managed cluster.
@ -129,6 +160,8 @@ const (
// WorkDegraded represents that the current state of Work does not match
// the desired state for a certain period.
WorkDegraded string = "Degraded"
// WorkDispatching represents the dispatching or suspension status of the Work resource
WorkDispatching string = "Dispatching"
)
// ResourceHealth represents that the health status of the reference resource.

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@ -365,6 +381,16 @@ func (in *WorkList) DeepCopyObject() runtime.Object {
func (in *WorkSpec) DeepCopyInto(out *WorkSpec) {
*out = *in
in.Workload.DeepCopyInto(&out.Workload)
if in.SuspendDispatching != nil {
in, out := &in.SuspendDispatching, &out.SuspendDispatching
*out = new(bool)
**out = **in
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
return
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
@ -32,11 +48,11 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=rb,categories={karmada-io}
// +kubebuilder:resource:path=resourcebindings,scope=Namespaced,shortName=rb,categories={karmada-io}
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ResourceBinding represents a binding of a kubernetes resource with a propagation policy.
type ResourceBinding struct {
@ -107,6 +123,46 @@ type ResourceBindingSpec struct {
// It inherits directly from the associated PropagationPolicy(or ClusterPropagationPolicy).
// +optional
Failover *policyv1alpha1.FailoverBehavior `json:"failover,omitempty"`
// ConflictResolution declares how potential conflict should be handled when
// a resource that is being propagated already exists in the target cluster.
//
// It defaults to "Abort" which means stop propagating to avoid unexpected
// overwrites. The "Overwrite" might be useful when migrating legacy cluster
// resources to Karmada, in which case conflict is predictable and can be
// instructed to Karmada take over the resource by overwriting.
//
// +kubebuilder:default="Abort"
// +kubebuilder:validation:Enum=Abort;Overwrite
// +optional
ConflictResolution policyv1alpha1.ConflictResolution `json:"conflictResolution,omitempty"`
// RescheduleTriggeredAt is a timestamp representing when the referenced resource is triggered rescheduling.
// When this field is updated, it means a rescheduling is manually triggered by user, and the expected behavior
// of this action is to do a complete recalculation without referring to last scheduling results.
// It works with the status.lastScheduledTime field, and only when this timestamp is later than timestamp in
// status.lastScheduledTime will the rescheduling actually execute, otherwise, ignored.
//
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
// +optional
RescheduleTriggeredAt *metav1.Time `json:"rescheduleTriggeredAt,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the binding object is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the binding object.
// This setting applies to all Work objects created under this binding object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority represents the scheduling priority assigned to workloads.
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
}
// ObjectReference contains enough information to locate the referenced object inside current cluster.
@ -146,6 +202,14 @@ type ReplicaRequirements struct {
// ResourceRequest represents the resources required by each replica.
// +optional
ResourceRequest corev1.ResourceList `json:"resourceRequest,omitempty"`
// Namespace represents the resources namespaces
// +optional
Namespace string `json:"namespace,omitempty"`
// PriorityClassName represents the resources priorityClassName
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica.
@ -180,6 +244,13 @@ type GracefulEvictionTask struct {
// +required
FromCluster string `json:"fromCluster"`
// PurgeMode represents how to deal with the legacy applications on the
// cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +optional
PurgeMode policyv1alpha1.PurgeMode `json:"purgeMode,omitempty"`
// Replicas indicates the number of replicas should be evicted.
// Should be ignored for resource type that doesn't have replica.
// +optional
@ -220,6 +291,11 @@ type GracefulEvictionTask struct {
// +optional
SuppressDeletion *bool `json:"suppressDeletion,omitempty"`
// PreservedLabelState represents the application state information collected from the original cluster,
// and it will be injected into the new cluster in form of application labels.
// +optional
PreservedLabelState map[string]string `json:"preservedLabelState,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created.
// Clients should not set this value to avoid the time inconsistency issue.
@ -227,7 +303,10 @@ type GracefulEvictionTask struct {
//
// Populated by the system. Read-only.
// +optional
CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty"`
CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"`
// ClustersBeforeFailover records the clusters where running the application before failover.
ClustersBeforeFailover []string `json:"clustersBeforeFailover,omitempty"`
}
// BindingSnapshot is a snapshot of a ResourceBinding or ClusterResourceBinding.
@ -247,6 +326,31 @@ type BindingSnapshot struct {
Clusters []TargetCluster `json:"clusters,omitempty"`
}
// Suspension defines the policy for suspending dispatching and scheduling.
type Suspension struct {
policyv1alpha1.Suspension `json:",inline"`
// Scheduling controls whether scheduling should be suspended, the scheduler will pause scheduling and not
// process resource binding when the value is true and resume scheduling when it's false or nil.
// This is designed for third-party systems to temporarily pause the scheduling of applications, which enabling
// manage resource allocation, prioritize critical workloads, etc.
// It is expected that third-party systems use an admission webhook to suspend scheduling at the time of
// ResourceBinding creation. Once a ResourceBinding has been scheduled, it cannot be paused afterward, as it may
// lead to ineffective suspension.
// +optional
Scheduling *bool `json:"scheduling,omitempty"`
}
// SchedulePriority represents the scheduling priority assigned to workloads.
type SchedulePriority struct {
// Priority specifies the scheduling priority for the binding.
// Higher values indicate a higher priority.
// If not explicitly set, the default value is 0.
// +kubebuilder:default=0
// +optional
Priority int32 `json:"priority,omitempty"`
}
// ResourceBindingStatus represents the overall status of the strategy as well as the referenced resources.
type ResourceBindingStatus struct {
// SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler.
@ -260,6 +364,11 @@ type ResourceBindingStatus struct {
// +optional
SchedulerObservedAffinityName string `json:"schedulerObservingAffinityName,omitempty"`
// LastScheduledTime representing the latest timestamp when scheduler successfully finished a scheduling.
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
// +optional
LastScheduledTime *metav1.Time `json:"lastScheduledTime,omitempty"`
// Conditions contain the different condition statuses.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
@ -307,6 +416,24 @@ const (
FullyApplied string = "FullyApplied"
)
// These are reasons for a binding's transition to a Scheduled condition.
const (
// BindingReasonSuccess reason in Scheduled condition means that binding has been scheduled successfully.
BindingReasonSuccess = "Success"
// BindingReasonSchedulerError reason in Scheduled condition means that some internal error happens
// during scheduling, for example due to api-server connection error.
BindingReasonSchedulerError = "SchedulerError"
// BindingReasonNoClusterFit reason in Scheduled condition means that scheduling has finished
// due to no fit cluster.
BindingReasonNoClusterFit = "NoClusterFit"
// BindingReasonUnschedulable reason in Scheduled condition means that the scheduler can't schedule
// the binding right now, for example due to insufficient resources in the clusters.
BindingReasonUnschedulable = "Unschedulable"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceBindingList contains a list of ResourceBinding.
@ -336,12 +463,12 @@ const (
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:resource:path=clusterresourcebindings,scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy.
type ClusterResourceBinding struct {
@ -356,7 +483,6 @@ type ClusterResourceBinding struct {
Status ResourceBindingStatus `json:"status,omitempty"`
}
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterResourceBindingList contains a list of ClusterResourceBinding.

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import "sigs.k8s.io/controller-runtime/pkg/conversion"

View File

@ -1,12 +1,33 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import policyv1alpha1 "github.com/karmada-io/api/policy/v1alpha1"
// TaskOptions represents options for GracefulEvictionTasks.
type TaskOptions struct {
producer string
reason string
message string
gracePeriodSeconds *int32
suppressDeletion *bool
purgeMode policyv1alpha1.PurgeMode
producer string
reason string
message string
gracePeriodSeconds *int32
suppressDeletion *bool
preservedLabelState map[string]string
clustersBeforeFailover []string
}
// Option configures a TaskOptions
@ -22,6 +43,13 @@ func NewTaskOptions(opts ...Option) *TaskOptions {
return &options
}
// WithPurgeMode sets the purgeMode for TaskOptions
func WithPurgeMode(purgeMode policyv1alpha1.PurgeMode) Option {
return func(o *TaskOptions) {
o.purgeMode = purgeMode
}
}
// WithProducer sets the producer for TaskOptions
func WithProducer(producer string) Option {
return func(o *TaskOptions) {
@ -57,6 +85,20 @@ func WithSuppressDeletion(suppressDeletion *bool) Option {
}
}
// WithPreservedLabelState sets the preservedLabelState for TaskOptions
func WithPreservedLabelState(preservedLabelState map[string]string) Option {
return func(o *TaskOptions) {
o.preservedLabelState = preservedLabelState
}
}
// WithClustersBeforeFailover sets the clustersBeforeFailover for TaskOptions
func WithClustersBeforeFailover(clustersBeforeFailover []string) Option {
return func(o *TaskOptions) {
o.clustersBeforeFailover = clustersBeforeFailover
}
}
// TargetContains checks if specific cluster present on the target list.
func (s *ResourceBindingSpec) TargetContains(name string) bool {
for i := range s.Clusters {
@ -137,15 +179,37 @@ func (s *ResourceBindingSpec) GracefulEvictCluster(name string, options *TaskOpt
// build eviction task
evictingCluster := evictCluster.DeepCopy()
evictionTask := GracefulEvictionTask{
FromCluster: evictingCluster.Name,
Reason: options.reason,
Message: options.message,
Producer: options.producer,
GracePeriodSeconds: options.gracePeriodSeconds,
SuppressDeletion: options.suppressDeletion,
FromCluster: evictingCluster.Name,
PurgeMode: options.purgeMode,
Reason: options.reason,
Message: options.message,
Producer: options.producer,
GracePeriodSeconds: options.gracePeriodSeconds,
SuppressDeletion: options.suppressDeletion,
PreservedLabelState: options.preservedLabelState,
ClustersBeforeFailover: options.clustersBeforeFailover,
}
if evictingCluster.Replicas > 0 {
evictionTask.Replicas = &evictingCluster.Replicas
}
s.GracefulEvictionTasks = append(s.GracefulEvictionTasks, evictionTask)
}
// SchedulingSuspended tells if the scheduling of ResourceBinding or
// ClusterResourceBinding is suspended.
func (s *ResourceBindingSpec) SchedulingSuspended() bool {
if s == nil || s.Suspension == nil || s.Suspension.Scheduling == nil {
return false
}
return *s.Suspension.Scheduling
}
// SchedulePriorityValue returns the scheduling priority declared
// by '.spec.SchedulePriority.Priority'.
func (s *ResourceBindingSpec) SchedulePriorityValue() int32 {
if s.SchedulePriority == nil {
return 0
}
return s.SchedulePriority.Priority
}

View File

@ -1,355 +0,0 @@
package v1alpha2
import (
"reflect"
"testing"
"k8s.io/utils/pointer"
)
func TestResourceBindingSpec_TargetContains(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
Expect bool
}{
{
Name: "cluster present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m1",
Expect: true,
},
{
Name: "cluster not present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m3",
Expect: false,
},
{
Name: "cluster is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "",
Expect: false,
},
{
Name: "target list is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "m1",
Expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
if tc.Spec.TargetContains(tc.ClusterName) != tc.Expect {
t.Fatalf("expect: %v, but got: %v", tc.Expect, tc.Spec.TargetContains(tc.ClusterName))
}
})
}
}
func TestResourceBindingSpec_AssignedReplicasForCluster(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
ExpectReplicas int32
}{
{
Name: "returns valid replicas in case cluster present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "m1",
ExpectReplicas: 1,
},
{
Name: "returns 0 in case cluster not present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "non-exist",
ExpectReplicas: 0,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
got := tc.Spec.AssignedReplicasForCluster(tc.ClusterName)
if tc.ExpectReplicas != got {
t.Fatalf("expect: %d, but got: %d", tc.ExpectReplicas, got)
}
})
}
}
func TestResourceBindingSpec_RemoveCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
ClusterName string
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "no-exist",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from head",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m1",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m2",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m3"}}},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m3",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "na",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.RemoveCluster(tc.ClusterName)
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
})
}
}
func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
EvictEvent GracefulEvictionTask
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
EvictEvent: GracefulEvictionTask{FromCluster: "non-exist"},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
},
{
Name: "evict cluster from head",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m2",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m2",
Replicas: pointer.Int32(2),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m3",
Replicas: pointer.Int32(3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "eviction task should be appended to non-empty tasks",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{{FromCluster: "original-cluster"}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "original-cluster",
},
{
FromCluster: "m3",
Replicas: pointer.Int32(3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
{
Name: "same eviction task should not be appended multiple times",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v2",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: pointer.Int32(1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.GracefulEvictCluster(tc.EvictEvent.FromCluster, NewTaskOptions(WithProducer(tc.EvictEvent.Producer), WithReason(tc.EvictEvent.Reason), WithMessage(tc.EvictEvent.Message)))
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect clusters: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
if !reflect.DeepEqual(tc.InputSpec.GracefulEvictionTasks, tc.ExpectSpec.GracefulEvictionTasks) {
t.Fatalf("expect tasks: %v, but got: %v", tc.ExpectSpec.GracefulEvictionTasks, tc.InputSpec.GracefulEvictionTasks)
}
})
}
}
func TestResourceBindingSpec_ClusterInGracefulEvictionTasks(t *testing.T) {
gracefulEvictionTasks := []GracefulEvictionTask{
{
FromCluster: "member1",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
{
FromCluster: "member2",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
}
tests := []struct {
name string
InputSpec ResourceBindingSpec
targetCluster string
expect bool
}{
{
name: "targetCluster is in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member1",
expect: true,
},
{
name: "targetCluster is not in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member3",
expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.name, func(t *testing.T) {
result := tc.InputSpec.ClusterInGracefulEvictionTasks(tc.targetCluster)
if result != tc.expect {
t.Errorf("expected: %v, but got: %v", tc.expect, result)
}
})
}
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha2 is the v1alpha2 version of the API.
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true

View File

@ -1,15 +1,44 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
const (
// ResourceBindingReferenceKey is the key of ResourceBinding object.
// It is usually a unique hash value of ResourceBinding object's namespace and name, intended to be added to the Work object.
// It will be used to retrieve all Works objects that derived from a specific ResourceBinding object.
ResourceBindingReferenceKey = "resourcebinding.karmada.io/key"
// ResourceBindingPermanentIDLabel is the identifier of a ResourceBinding object.
// Karmada generates a unique identifier, such as metadata.UUID, for each ResourceBinding object.
// This identifier will be used as a label selector to locate corresponding resources, such as Work.
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
ResourceBindingPermanentIDLabel = "resourcebinding.karmada.io/permanent-id"
// ClusterResourceBindingReferenceKey is the key of ClusterResourceBinding object.
// It is usually a unique hash value of ClusterResourceBinding object's namespace and name, intended to be added to the Work object.
// It will be used to retrieve all Works objects that derived by a specific ClusterResourceBinding object.
ClusterResourceBindingReferenceKey = "clusterresourcebinding.karmada.io/key"
// ClusterResourceBindingPermanentIDLabel is the identifier of a ClusterResourceBinding object.
// Karmada generates a unique identifier, such as metadata.UUID, for each ClusterResourceBinding object.
// This identifier will be used as a label selector to locate corresponding resources, such as Work.
// The reason for generating a new unique identifier instead of simply using metadata.UUID is because:
// In backup scenarios, when applying the backup resource manifest in a new cluster, the UUID may change.
ClusterResourceBindingPermanentIDLabel = "clusterresourcebinding.karmada.io/permanent-id"
// WorkPermanentIDLabel is the ID of Work object.
WorkPermanentIDLabel = "work.karmada.io/permanent-id"
// WorkNamespaceAnnotation is added to objects to specify associated Work's namespace.
WorkNamespaceAnnotation = "work.karmada.io/namespace"
// WorkNameAnnotation is added to objects to specify associated Work's name.
WorkNameAnnotation = "work.karmada.io/name"
// ResourceBindingNamespaceAnnotationKey is added to object to describe the associated ResourceBinding's namespace.
// It is added to:
@ -29,11 +58,19 @@ const (
// - Manifest in Work object: describes the name of ClusterResourceBinding which the manifest derived from.
ClusterResourceBindingAnnotationKey = "clusterresourcebinding.karmada.io/name"
// WorkNamespaceLabel is added to objects to specify associated Work's namespace.
WorkNamespaceLabel = "work.karmada.io/namespace"
// BindingManagedByLabel is added to ResourceBinding to represent what kind of resource manages this Binding.
BindingManagedByLabel = "binding.karmada.io/managed-by"
// WorkNameLabel is added to objects to specify associated Work's name.
WorkNameLabel = "work.karmada.io/name"
// ResourceTemplateGenerationAnnotationKey records the generation of resource template in Karmada APIServer,
// It will be injected into the resource when propagating to member clusters, to denote the specific version of
// the resource template from which the resource is derived. It might be helpful in the following cases:
// 1. Facilitating observation from member clusters to ascertain if the most recent resource template has been
// completely synced.
// 2. The annotation will be synced back to Karmada during the process of syncing resource status,
// by leveraging this annotation, Karmada can infer if the most recent resource template has been completely
// synced on member clusters, then generates accurate observed generation(like Deployment's .status.observedGeneration)
// which might be required by the release system.
ResourceTemplateGenerationAnnotationKey = "resourcetemplate.karmada.io/generation"
)
// Define resource conflict resolution
@ -43,11 +80,15 @@ const (
// The valid value is:
// - overwrite: always overwrite the resource if already exist. The resource will be overwritten with the
// configuration from control plane.
// - abort: do not resolve the conflict and stop propagating to avoid unexpected overwrites (default value)
// Note: Propagation of the resource template without this annotation will fail in case of already exists.
ResourceConflictResolutionAnnotation = "work.karmada.io/conflict-resolution"
// ResourceConflictResolutionOverwrite is the value of ResourceConflictResolutionAnnotation, indicating the overwrite strategy.
// ResourceConflictResolutionOverwrite is a value of ResourceConflictResolutionAnnotation, indicating the overwrite strategy.
ResourceConflictResolutionOverwrite = "overwrite"
// ResourceConflictResolutionAbort is a value of ResourceConflictResolutionAnnotation, indicating stop propagating.
ResourceConflictResolutionAbort = "abort"
)
// Define annotations that are added to the resource template.
@ -75,6 +116,14 @@ const (
// E.g. "resourcetemplate.karmada.io/managed-annotations: bar,foo".
// Note: the keys will be sorted in alphabetical order.
ManagedAnnotation = "resourcetemplate.karmada.io/managed-annotations"
// DeletionProtectionLabelKey If a user assigns the DeletionProtectionLabelKey label to a specific resource,
// and the value of this label is DeletionProtectionAlways, then deletion requests
// for this resource will be denied.
// In the current design, only the Value set to 'Always' will be protected,
// Additional options will be added here in the future.
DeletionProtectionLabelKey = "resourcetemplate.karmada.io/deletion-protected"
DeletionProtectionAlways = "Always"
)
// Define eviction reasons.

View File

@ -1,6 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
@ -133,7 +149,22 @@ func (in *GracefulEvictionTask) DeepCopyInto(out *GracefulEvictionTask) {
*out = new(bool)
**out = **in
}
in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
if in.PreservedLabelState != nil {
in, out := &in.PreservedLabelState, &out.PreservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.CreationTimestamp != nil {
in, out := &in.CreationTimestamp, &out.CreationTimestamp
*out = (*in).DeepCopy()
}
if in.ClustersBeforeFailover != nil {
in, out := &in.ClustersBeforeFailover, &out.ClustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -325,6 +356,25 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) {
*out = new(v1alpha1.FailoverBehavior)
(*in).DeepCopyInto(*out)
}
if in.RescheduleTriggeredAt != nil {
in, out := &in.RescheduleTriggeredAt, &out.RescheduleTriggeredAt
*out = (*in).DeepCopy()
}
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return
}
@ -341,6 +391,10 @@ func (in *ResourceBindingSpec) DeepCopy() *ResourceBindingSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceBindingStatus) DeepCopyInto(out *ResourceBindingStatus) {
*out = *in
if in.LastScheduledTime != nil {
in, out := &in.LastScheduledTime, &out.LastScheduledTime
*out = (*in).DeepCopy()
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
@ -368,6 +422,44 @@ func (in *ResourceBindingStatus) DeepCopy() *ResourceBindingStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
in.Suspension.DeepCopyInto(&out.Suspension)
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetCluster) DeepCopyInto(out *TargetCluster) {
*out = *in
@ -397,6 +489,18 @@ func (in *TaskOptions) DeepCopyInto(out *TaskOptions) {
*out = new(bool)
**out = **in
}
if in.preservedLabelState != nil {
in, out := &in.preservedLabelState, &out.preservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.clustersBeforeFailover != nil {
in, out := &in.clustersBeforeFailover, &out.clustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -1,11 +1,30 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by register-gen. DO NOT EDIT.
package v1alpha2
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -27,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)