mirror of https://github.com/openkruise/kruise.git
fix: replace deadcode with unused and address existing issues (#1175)
* fix: replace deadcode with unused and address existing issues Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * chore: update manifests Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * fix: remove unused functions to make linter happy Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * fix: make generate Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * fix: run make generate manifests using Go 1.18 Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> --------- Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>
This commit is contained in:
parent
83029a47df
commit
1ee3516903
|
|
@ -72,7 +72,6 @@ linters:
|
|||
disable-all: true
|
||||
enable:
|
||||
# TODO Enforce the below linters later
|
||||
- deadcode
|
||||
- gofmt
|
||||
- govet
|
||||
- goimports
|
||||
|
|
@ -80,6 +79,7 @@ linters:
|
|||
- misspell
|
||||
- vet
|
||||
- unconvert
|
||||
- unused
|
||||
issues:
|
||||
exclude:
|
||||
# staticcheck
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ Kruise uses semantic versioning so that their users can reliably upgrade accordi
|
|||
|
||||
## Release Cycle
|
||||
|
||||
Kruise is using a release schedule of 2 to 3 months and uses [GitHub milestones](https://github.com/openkruise/kruise/milestones) to provide an indiciation of when the next release is planned.
|
||||
Kruise is using a release schedule of 2 to 3 months and uses [GitHub milestones](https://github.com/openkruise/kruise/milestones) to provide an indication of when the next release is planned.
|
||||
It can, however, happen that releases get delayed which will be reflected on the milestone.
|
||||
|
||||
## Release Process
|
||||
|
|
@ -33,5 +33,5 @@ Next to that, you can join the conversation for every new release on GitHub Disc
|
|||
- [User Documentation](https://openkruise.io/docs/)
|
||||
- Community
|
||||
- [GitHub Discussions](https://github.com/openkruise/kruise/discussions/new)
|
||||
- [Channel #openkruise in Kubernete Slack](https://kubernetes.slack.com/channels/openkruise) ([registration](http://slack.k8s.io/))
|
||||
- [Channel #openkruise in Kubernetes Slack](https://kubernetes.slack.com/channels/openkruise) ([registration](http://slack.k8s.io/))
|
||||
- DingTalk:Search GroupID 23330762 (Chinese)
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ type UpdatePriorityStrategy struct {
|
|||
WeightPriority []UpdatePriorityWeightTerm `json:"weightPriority,omitempty"`
|
||||
}
|
||||
|
||||
// UpdatePriorityOrder defines order priority.
|
||||
// UpdatePriorityOrderTerm defines order priority.
|
||||
type UpdatePriorityOrderTerm struct {
|
||||
// Calculate priority by value of this key.
|
||||
// Values of this key, will be sorted by GetInt(val). GetInt method will find the last int in value,
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ spec:
|
|||
in labels will be sorted by the value of key2 and put behind
|
||||
those pods have key1.'
|
||||
items:
|
||||
description: UpdatePriorityOrder defines order priority.
|
||||
description: UpdatePriorityOrderTerm defines order priority.
|
||||
properties:
|
||||
orderedKey:
|
||||
description: Calculate priority by value of this key.
|
||||
|
|
|
|||
|
|
@ -238,7 +238,8 @@ spec:
|
|||
by the value of key2 and put behind those pods have
|
||||
key1.'
|
||||
items:
|
||||
description: UpdatePriorityOrder defines order priority.
|
||||
description: UpdatePriorityOrderTerm defines order
|
||||
priority.
|
||||
properties:
|
||||
orderedKey:
|
||||
description: Calculate priority by value of
|
||||
|
|
@ -779,7 +780,8 @@ spec:
|
|||
by the value of key2 and put behind those pods have
|
||||
key1.'
|
||||
items:
|
||||
description: UpdatePriorityOrder defines order priority.
|
||||
description: UpdatePriorityOrderTerm defines order
|
||||
priority.
|
||||
properties:
|
||||
orderedKey:
|
||||
description: Calculate priority by value of
|
||||
|
|
|
|||
|
|
@ -436,8 +436,8 @@ spec:
|
|||
the value of key2 and put behind those
|
||||
pods have key1.'
|
||||
items:
|
||||
description: UpdatePriorityOrder defines
|
||||
order priority.
|
||||
description: UpdatePriorityOrderTerm
|
||||
defines order priority.
|
||||
properties:
|
||||
orderedKey:
|
||||
description: Calculate priority
|
||||
|
|
@ -816,8 +816,8 @@ spec:
|
|||
key2 in labels will be sorted by the value of
|
||||
key2 and put behind those pods have key1.'
|
||||
items:
|
||||
description: UpdatePriorityOrder defines order
|
||||
priority.
|
||||
description: UpdatePriorityOrderTerm defines
|
||||
order priority.
|
||||
properties:
|
||||
orderedKey:
|
||||
description: Calculate priority by value
|
||||
|
|
|
|||
15
main.go
15
main.go
|
|
@ -25,7 +25,6 @@ import (
|
|||
"time"
|
||||
_ "time/tzdata" // for AdvancedCronJob Time Zone support
|
||||
|
||||
"github.com/openkruise/kruise/pkg/util/controllerfinder"
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -38,18 +37,18 @@ import (
|
|||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
extclient "github.com/openkruise/kruise/pkg/client"
|
||||
"github.com/openkruise/kruise/pkg/features"
|
||||
utilclient "github.com/openkruise/kruise/pkg/util/client"
|
||||
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
|
||||
"github.com/openkruise/kruise/pkg/util/fieldindex"
|
||||
"github.com/openkruise/kruise/pkg/webhook"
|
||||
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
|
||||
policyv1alpha1 "github.com/openkruise/kruise/apis/policy/v1alpha1"
|
||||
extclient "github.com/openkruise/kruise/pkg/client"
|
||||
"github.com/openkruise/kruise/pkg/controller"
|
||||
"github.com/openkruise/kruise/pkg/features"
|
||||
utilclient "github.com/openkruise/kruise/pkg/util/client"
|
||||
"github.com/openkruise/kruise/pkg/util/controllerfinder"
|
||||
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
|
||||
"github.com/openkruise/kruise/pkg/util/fieldindex"
|
||||
_ "github.com/openkruise/kruise/pkg/util/metrics/leadership"
|
||||
"github.com/openkruise/kruise/pkg/webhook"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -30,25 +30,25 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// record which hot upgrade container is working currently
|
||||
// SidecarSetWorkingHotUpgradeContainer records which hot upgrade container is working currently
|
||||
SidecarSetWorkingHotUpgradeContainer = "kruise.io/sidecarset-working-hotupgrade-container"
|
||||
|
||||
// hotUpgrade container name suffix
|
||||
hotUpgradeNameSuffix1 = "-1"
|
||||
hotUpgradeNameSuffix2 = "-2"
|
||||
|
||||
// sidecar container version in container env(SIDECARSET_VERSION)
|
||||
// SidecarSetVersionEnvKey is sidecar container version in container env(SIDECARSET_VERSION)
|
||||
SidecarSetVersionEnvKey = "SIDECARSET_VERSION"
|
||||
// container version env in the other sidecar container of the same hotupgrade sidecar(SIDECARSET_VERSION_ALT)
|
||||
// SidecarSetVersionAltEnvKey is container version env in the other sidecar container of the same hotupgrade sidecar(SIDECARSET_VERSION_ALT)
|
||||
SidecarSetVersionAltEnvKey = "SIDECARSET_VERSION_ALT"
|
||||
)
|
||||
|
||||
// return format: mesh-1, mesh-2
|
||||
// GetHotUpgradeContainerName returns format: mesh-1, mesh-2
|
||||
func GetHotUpgradeContainerName(name string) (string, string) {
|
||||
return name + hotUpgradeNameSuffix1, name + hotUpgradeNameSuffix2
|
||||
}
|
||||
|
||||
// only used in hot upgrade container
|
||||
// GetPodSidecarSetVersionAnnotation is only used in hot upgrade container
|
||||
// cName format: mesh-1, mesh-2
|
||||
func GetPodSidecarSetVersionAnnotation(cName string) string {
|
||||
return fmt.Sprintf("version.sidecarset.kruise.io/%s", cName)
|
||||
|
|
@ -58,12 +58,12 @@ func GetPodSidecarSetVersionAltAnnotation(cName string) string {
|
|||
return fmt.Sprintf("versionalt.sidecarset.kruise.io/%s", cName)
|
||||
}
|
||||
|
||||
// whether sidecar container update strategy is HotUpdate
|
||||
// IsHotUpgradeContainer indicates whether sidecar container update strategy is HotUpdate
|
||||
func IsHotUpgradeContainer(sidecarContainer *appsv1alpha1.SidecarContainer) bool {
|
||||
return sidecarContainer.UpgradeStrategy.UpgradeType == appsv1alpha1.SidecarContainerHotUpgrade
|
||||
}
|
||||
|
||||
// which hot upgrade sidecar container is working now
|
||||
// GetPodHotUpgradeInfoInAnnotations checks which hot upgrade sidecar container is working now
|
||||
// format: sidecarset.spec.container[x].name -> pod.spec.container[x].name
|
||||
// for example: mesh -> mesh-1, envoy -> envoy-2
|
||||
func GetPodHotUpgradeInfoInAnnotations(pod *corev1.Pod) map[string]string {
|
||||
|
|
@ -84,7 +84,7 @@ func GetPodHotUpgradeInfoInAnnotations(pod *corev1.Pod) map[string]string {
|
|||
// GetPodHotUpgradeContainers return two hot upgrade sidecar containers
|
||||
// workContainer: currently working sidecar container, record in pod annotations[kruise.io/sidecarset-working-hotupgrade-container]
|
||||
// otherContainer:
|
||||
// 1. empty container
|
||||
// 1. empty container
|
||||
// 2. when in hot upgrading process, the older sidecar container
|
||||
func GetPodHotUpgradeContainers(sidecarName string, pod *corev1.Pod) (workContainer, otherContainer string) {
|
||||
hotUpgradeWorkContainer := GetPodHotUpgradeInfoInAnnotations(pod)
|
||||
|
|
|
|||
|
|
@ -464,19 +464,6 @@ func (dsc *ReconcileDaemonSet) syncDaemonSet(request reconcile.Request) error {
|
|||
return dsc.updateDaemonSetStatus(ds, nodeList, hash, true)
|
||||
}
|
||||
|
||||
func (dsc *ReconcileDaemonSet) getDaemonSetsForPod(pod *corev1.Pod) []*appsv1alpha1.DaemonSet {
|
||||
sets, err := dsc.GetPodDaemonSets(pod)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(sets) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
utilruntime.HandleError(fmt.Errorf("user error! more than one daemon is selecting pods with labels: %+v", pod.Labels))
|
||||
}
|
||||
return sets
|
||||
}
|
||||
|
||||
// Predicates checks if a DaemonSet's pod can run on a node.
|
||||
func Predicates(pod *corev1.Pod, node *corev1.Node, taints []corev1.Taint) (fitsNodeName, fitsNodeAffinity, fitsTaints bool) {
|
||||
fitsNodeName = len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name
|
||||
|
|
|
|||
|
|
@ -36,20 +36,13 @@ import (
|
|||
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
|
||||
)
|
||||
|
||||
func (r *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alpha1.DaemonSet, oldRevisions []*apps.ControllerRevision, updateRevision *apps.ControllerRevision) error {
|
||||
func (dsc *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alpha1.DaemonSet, oldRevisions []*apps.ControllerRevision, updateRevision *apps.ControllerRevision) error {
|
||||
if _, ok := updateRevision.Labels[appsv1alpha1.ImagePreDownloadCreatedKey]; ok {
|
||||
return nil
|
||||
} else if _, ok := updateRevision.Labels[appsv1alpha1.ImagePreDownloadIgnoredKey]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
//// ignore if replicas <= minimumReplicasToPreDownloadImage
|
||||
//if *ds.Spec.Replicas <= minimumReplicasToPreDownloadImage {
|
||||
// klog.V(4).Infof("CloneSet %s/%s skipped to create ImagePullJob for replicas %d <= %d",
|
||||
// ds.Namespace, ds.Name, *ds.Spec.Replicas, minimumReplicasToPreDownloadImage)
|
||||
// return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
//}
|
||||
|
||||
// ignore if all Pods update in one batch
|
||||
var partition, maxUnavailable int
|
||||
var dsPodsNumber = int(ds.Status.DesiredNumberScheduled)
|
||||
|
|
@ -61,7 +54,7 @@ func (r *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alpha
|
|||
if partition == 0 && maxUnavailable >= dsPodsNumber {
|
||||
klog.V(4).Infof("DaemonSet %s/%s skipped to create ImagePullJob for all Pods update in one batch, replicas=%d, partition=%d, maxUnavailable=%d",
|
||||
ds.Namespace, ds.Name, dsPodsNumber, partition, maxUnavailable)
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
return dsc.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
// start to create jobs
|
||||
|
|
@ -92,25 +85,25 @@ func (r *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alpha
|
|||
for name, image := range containerImages {
|
||||
// job name is revision name + container name, it can not be more than 255 characters
|
||||
jobName := fmt.Sprintf("%s-%s", updateRevision.Name, name)
|
||||
err := imagejobutilfunc.CreateJobForWorkload(r.Client, ds, controllerKind, jobName, image, labelMap, *selector, pullSecrets)
|
||||
err := imagejobutilfunc.CreateJobForWorkload(dsc.Client, ds, controllerKind, jobName, image, labelMap, *selector, pullSecrets)
|
||||
if err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("DaemonSet %s/%s failed to create ImagePullJob %s: %v", ds.Namespace, ds.Name, jobName, err)
|
||||
r.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedCreateImagePullJob", "failed to create ImagePullJob %s: %v", jobName, err)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedCreateImagePullJob", "failed to create ImagePullJob %s: %v", jobName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("DaemonSet %s/%s created ImagePullJob %s for image: %s", ds.Namespace, ds.Name, jobName, image)
|
||||
r.eventRecorder.Eventf(ds, v1.EventTypeNormal, "CreatedImagePullJob", "created ImagePullJob %s for image: %s", jobName, image)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "CreatedImagePullJob", "created ImagePullJob %s for image: %s", jobName, image)
|
||||
}
|
||||
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadCreatedKey, "true")
|
||||
return dsc.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadCreatedKey, "true")
|
||||
}
|
||||
|
||||
func (r *ReconcileDaemonSet) patchControllerRevisionLabels(revision *apps.ControllerRevision, key, value string) error {
|
||||
func (dsc *ReconcileDaemonSet) patchControllerRevisionLabels(revision *apps.ControllerRevision, key, value string) error {
|
||||
oldRevision := revision.ResourceVersion
|
||||
body := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, key, value)
|
||||
if err := r.Patch(context.TODO(), revision, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
if err := dsc.Patch(context.TODO(), revision, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
return err
|
||||
}
|
||||
if oldRevision != revision.ResourceVersion {
|
||||
|
|
|
|||
|
|
@ -92,10 +92,10 @@ func nodeInSameCondition(old []corev1.NodeCondition, cur []corev1.NodeCondition)
|
|||
|
||||
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
||||
// summary. Returned booleans are:
|
||||
// * shouldRun:
|
||||
// - shouldRun:
|
||||
// Returns true when a daemonset should run on the node if a daemonset pod is not already
|
||||
// running on that node.
|
||||
// * shouldContinueRunning:
|
||||
// - shouldContinueRunning:
|
||||
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
||||
// running on that node.
|
||||
func nodeShouldRunDaemonPod(node *corev1.Node, ds *appsv1alpha1.DaemonSet) (bool, bool) {
|
||||
|
|
|
|||
|
|
@ -129,8 +129,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//In workload scaling scenario, there is a risk of interception by the pub webhook against the scaled pod.
|
||||
//The solution for this scenario: the pub controller listens to workload replicas changes and adjusts UnavailableAllowed in time.
|
||||
// In workload scaling scenario, there is a risk of interception by the pub webhook against the scaled pod.
|
||||
// The solution for this scenario: the pub controller listens to workload replicas changes and adjusts UnavailableAllowed in time.
|
||||
// Example for:
|
||||
// 1. cloneSet.replicas = 100, pub.MaxUnavailable = 10%, then UnavailableAllowed=10.
|
||||
// 2. at this time the cloneSet.replicas is scaled down to 50, the pub controller listens to the replicas change, triggering reconcile will adjust UnavailableAllowed to 55.
|
||||
|
|
@ -152,7 +152,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//kruise AdvancedStatefulSet
|
||||
// kruise AdvancedStatefulSet
|
||||
if err = c.Watch(&source.Kind{Type: &kruiseappsv1beta1.StatefulSet{}}, &SetEnqueueRequestForPUB{mgr}, predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
old := e.ObjectOld.(*kruiseappsv1beta1.StatefulSet)
|
||||
|
|
@ -169,7 +169,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//CloneSet
|
||||
// CloneSet
|
||||
if err = c.Watch(&source.Kind{Type: &kruiseappsv1alpha1.CloneSet{}}, &SetEnqueueRequestForPUB{mgr}, predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
old := e.ObjectOld.(*kruiseappsv1alpha1.CloneSet)
|
||||
|
|
@ -186,7 +186,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//StatefulSet
|
||||
// StatefulSet
|
||||
if err = c.Watch(&source.Kind{Type: &apps.StatefulSet{}}, &SetEnqueueRequestForPUB{mgr}, predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
old := e.ObjectOld.(*apps.StatefulSet)
|
||||
|
|
@ -398,40 +398,6 @@ func countAvailablePods(pods []*corev1.Pod, disruptedPods, unavailablePods map[s
|
|||
return
|
||||
}
|
||||
|
||||
// This function returns pods using the PodUnavailableBudget object.
|
||||
func (r *ReconcilePodUnavailableBudget) getPodsForPub(pub *policyv1alpha1.PodUnavailableBudget) ([]*corev1.Pod, error) {
|
||||
// if targetReference isn't nil, priority to take effect
|
||||
var listOptions *client.ListOptions
|
||||
if pub.Spec.TargetReference != nil {
|
||||
ref := pub.Spec.TargetReference
|
||||
matchedPods, _, err := r.controllerFinder.GetPodsForRef(ref.APIVersion, ref.Kind, pub.Namespace, ref.Name, true)
|
||||
return matchedPods, err
|
||||
} else if pub.Spec.Selector == nil {
|
||||
r.recorder.Eventf(pub, corev1.EventTypeWarning, "NoSelector", "Selector cannot be empty")
|
||||
return nil, nil
|
||||
}
|
||||
// get pods for selector
|
||||
labelSelector, err := util.ValidatedLabelSelectorAsSelector(pub.Spec.Selector)
|
||||
if err != nil {
|
||||
r.recorder.Eventf(pub, corev1.EventTypeWarning, "Selector", fmt.Sprintf("Label selector failed: %s", err.Error()))
|
||||
return nil, nil
|
||||
}
|
||||
listOptions = &client.ListOptions{Namespace: pub.Namespace, LabelSelector: labelSelector}
|
||||
podList := &corev1.PodList{}
|
||||
if err := r.List(context.TODO(), podList, listOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matchedPods := make([]*corev1.Pod, 0, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pod := &podList.Items[i]
|
||||
if kubecontroller.IsPodActive(pod) {
|
||||
matchedPods = append(matchedPods, pod)
|
||||
}
|
||||
}
|
||||
return matchedPods, nil
|
||||
}
|
||||
|
||||
func (r *ReconcilePodUnavailableBudget) getDesiredAvailableForPub(pub *policyv1alpha1.PodUnavailableBudget, expectedCount int32) (desiredAvailable int32, err error) {
|
||||
if pub.Spec.MaxUnavailable != nil {
|
||||
var maxUnavailable int
|
||||
|
|
@ -459,61 +425,6 @@ func (r *ReconcilePodUnavailableBudget) getDesiredAvailableForPub(pub *policyv1a
|
|||
return
|
||||
}
|
||||
|
||||
func (r *ReconcilePodUnavailableBudget) getExpectedScale(pub *policyv1alpha1.PodUnavailableBudget, pods []*corev1.Pod) (int32, error) {
|
||||
// if spec.targetRef!=nil, expectedCount=targetRef.spec.replicas
|
||||
if pub.Spec.TargetReference != nil {
|
||||
ref := controllerfinder.ControllerReference{
|
||||
APIVersion: pub.Spec.TargetReference.APIVersion,
|
||||
Kind: pub.Spec.TargetReference.Kind,
|
||||
Name: pub.Spec.TargetReference.Name,
|
||||
}
|
||||
for _, finder := range r.controllerFinder.Finders() {
|
||||
scaleNSelector, err := finder(ref, pub.Namespace)
|
||||
if err != nil {
|
||||
klog.Errorf("podUnavailableBudget(%s/%s) handle TargetReference failed: %s", pub.Namespace, pub.Name, err.Error())
|
||||
return 0, err
|
||||
}
|
||||
if scaleNSelector != nil && scaleNSelector.Metadata.DeletionTimestamp.IsZero() {
|
||||
return scaleNSelector.Scale, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if target reference workload not found, or reference selector is nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// 1. Find the controller for each pod. If any pod has 0 controllers,
|
||||
// that's an error. With ControllerRef, a pod can only have 1 controller.
|
||||
// A mapping from controllers to their scale.
|
||||
controllerScale := map[types.UID]int32{}
|
||||
for _, pod := range pods {
|
||||
ref := metav1.GetControllerOf(pod)
|
||||
if ref == nil {
|
||||
continue
|
||||
}
|
||||
// If we already know the scale of the controller there is no need to do anything.
|
||||
if _, found := controllerScale[ref.UID]; found {
|
||||
continue
|
||||
}
|
||||
// Check all the supported controllers to find the desired scale.
|
||||
workload, err := r.controllerFinder.GetScaleAndSelectorForRef(ref.APIVersion, ref.Kind, pod.Namespace, ref.Name, ref.UID)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return 0, err
|
||||
}
|
||||
if workload != nil && workload.Metadata.DeletionTimestamp.IsZero() {
|
||||
controllerScale[workload.UID] = workload.Scale
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Add up all the controllers.
|
||||
var expectedCount int32
|
||||
for _, count := range controllerScale {
|
||||
expectedCount += count
|
||||
}
|
||||
|
||||
return expectedCount, nil
|
||||
}
|
||||
|
||||
func (r *ReconcilePodUnavailableBudget) buildDisruptedAndUnavailablePods(pods []*corev1.Pod, pub *policyv1alpha1.PodUnavailableBudget, currentTime time.Time) (
|
||||
// disruptedPods, unavailablePods, recheckTime
|
||||
map[string]metav1.Time, map[string]metav1.Time, *time.Time) {
|
||||
|
|
@ -533,7 +444,7 @@ func (r *ReconcilePodUnavailableBudget) buildDisruptedAndUnavailablePods(pods []
|
|||
continue
|
||||
}
|
||||
|
||||
//handle disruption pods which will be eviction or deletion
|
||||
// handle disruption pods which will be eviction or deletion
|
||||
disruptionTime, found := disruptedPods[pod.Name]
|
||||
if found {
|
||||
expectedDeletion := disruptionTime.Time.Add(DeletionTimeout)
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func setupController(client clientset.Interface, kruiseClient kruiseclientset.In
|
|||
cache.WaitForCacheSync(
|
||||
stop,
|
||||
kruiseInformerFactory.Apps().V1beta1().StatefulSets().Informer().HasSynced,
|
||||
//informerFactory.Apps().V1().StatefulSets().Informer().HasSynced,
|
||||
// informerFactory.Apps().V1().StatefulSets().Informer().HasSynced,
|
||||
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
||||
informerFactory.Apps().V1().ControllerRevisions().Informer().HasSynced,
|
||||
)
|
||||
|
|
@ -671,7 +671,7 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) {
|
|||
cache.WaitForCacheSync(
|
||||
stop,
|
||||
kruiseInformerFactory.Apps().V1beta1().StatefulSets().Informer().HasSynced,
|
||||
//informerFactory.Apps().V1().StatefulSets().Informer().HasSynced,
|
||||
// informerFactory.Apps().V1().StatefulSets().Informer().HasSynced,
|
||||
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
||||
informerFactory.Apps().V1().ControllerRevisions().Informer().HasSynced,
|
||||
)
|
||||
|
|
@ -1658,7 +1658,7 @@ func TestUpdateStatefulSetWithMinReadySeconds(t *testing.T) {
|
|||
validate func(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error
|
||||
}
|
||||
const setSize = 5
|
||||
//originalImage := newStatefulSet(1).Spec.Template.Spec.Containers[0].Image
|
||||
// originalImage := newStatefulSet(1).Spec.Template.Spec.Containers[0].Image
|
||||
newImage := "foo"
|
||||
|
||||
readyPods := func(partition, pauseSecond int) func(om *fakeObjectManager, set *appsv1beta1.StatefulSet,
|
||||
|
|
@ -2955,39 +2955,6 @@ func (om *fakeObjectManager) setPodReady(set *appsv1beta1.StatefulSet, ordinal i
|
|||
return om.podsLister.Pods(set.Namespace).List(selector)
|
||||
}
|
||||
|
||||
func (om *fakeObjectManager) setPodAvailable(set *appsv1beta1.StatefulSet, ordinal int, lastTransitionTime time.Time) ([]*v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := om.podsLister.Pods(set.Namespace).List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if 0 > ordinal || ordinal >= len(pods) {
|
||||
return nil, fmt.Errorf("ordinal %d out of range [0,%d)", ordinal, len(pods))
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
pod := pods[ordinal].DeepCopy()
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: lastTransitionTime}}
|
||||
_, existingCondition := podutil.GetPodCondition(&pod.Status, condition.Type)
|
||||
if existingCondition != nil {
|
||||
existingCondition.Status = v1.ConditionTrue
|
||||
existingCondition.LastTransitionTime = metav1.Time{Time: lastTransitionTime}
|
||||
} else {
|
||||
existingCondition = &v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Time{Time: lastTransitionTime},
|
||||
}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, *existingCondition)
|
||||
}
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
fakeResourceVersion(pod)
|
||||
om.podsIndexer.Update(pod)
|
||||
return om.podsLister.Pods(set.Namespace).List(selector)
|
||||
}
|
||||
|
||||
func (om *fakeObjectManager) addTerminatingPod(set *appsv1beta1.StatefulSet, ordinal int) ([]*v1.Pod, error) {
|
||||
pod := newStatefulSetPod(set, ordinal)
|
||||
pod.SetUID(types.UID(pod.Name + "-uid")) // To match fakeObjectManager.CreatePod
|
||||
|
|
|
|||
|
|
@ -305,7 +305,8 @@ func (ssc *ReconcileStatefulSet) adoptOrphanRevisions(set *appsv1beta1.StatefulS
|
|||
// It also reconciles ControllerRef by adopting/orphaning.
|
||||
//
|
||||
// NOTE: Returned Pods are pointers to objects from the cache.
|
||||
// If you need to modify one, you need to copy it first.
|
||||
//
|
||||
// If you need to modify one, you need to copy it first.
|
||||
func (ssc *ReconcileStatefulSet) getPodsForStatefulSet(set *appsv1beta1.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
// List all pods to include the pods that don't match the selector anymore but
|
||||
// has a ControllerRef pointing to this StatefulSet.
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
|
|||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
wsutil "github.com/openkruise/kruise/pkg/util/workloadspread"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
|
@ -32,6 +31,7 @@ import (
|
|||
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
"github.com/openkruise/kruise/pkg/util/controllerfinder"
|
||||
wsutil "github.com/openkruise/kruise/pkg/util/workloadspread"
|
||||
)
|
||||
|
||||
func newScheduledFailedPod() *corev1.Pod {
|
||||
|
|
|
|||
|
|
@ -58,20 +58,20 @@ func (r *ReconcileWorkloadSpread) updateDeletionCost(ws *appsv1alpha1.WorkloadSp
|
|||
|
||||
// syncSubsetPodDeletionCost calculates the deletion-cost for the Pods belong to subset and update deletion-cost annotation.
|
||||
// We have two conditions for subset's Pod deletion-cost
|
||||
// 1. the number of active Pods in this subset <= maxReplicas or maxReplicas = nil, deletion-cost = 100 * (subsets.length - subsetIndex).
|
||||
// subset-a subset-b subset-c
|
||||
// maxReplicas 10 10 nil
|
||||
// pods number 10 10 10
|
||||
// deletion-cost 300 200 100
|
||||
// We delete Pods from back subset to front subset. The deletion order is: c -> b -> a.
|
||||
// 2. the number of active Pods in this subset > maxReplicas
|
||||
// two class:
|
||||
// (a) the extra Pods more than maxReplicas: deletion-cost = -100 * (subsetIndex + 1) [Priority Deletion],
|
||||
// (b) deletion-cost = 100 * (subsets.length - subsetIndex) [Reserve].
|
||||
// subset-a subset-b subset-c
|
||||
// maxReplicas 10 10 nil
|
||||
// pods number 20 20 20
|
||||
// deletion-cost (300,-100) (200,-200) 100
|
||||
// 1. the number of active Pods in this subset <= maxReplicas or maxReplicas = nil, deletion-cost = 100 * (subsets.length - subsetIndex).
|
||||
// name subset-a subset-b subset-c
|
||||
// maxReplicas 10 10 nil
|
||||
// pods number 10 10 10
|
||||
// deletion-cost 300 200 100
|
||||
// We delete Pods from back subset to front subset. The deletion order is: c -> b -> a.
|
||||
// 2. the number of active Pods in this subset > maxReplicas
|
||||
// two class:
|
||||
// (a) the extra Pods more than maxReplicas: deletion-cost = -100 * (subsetIndex + 1) [Priority Deletion],
|
||||
// (b) deletion-cost = 100 * (subsets.length - subsetIndex) [Reserve].
|
||||
// name subset-a subset-b subset-c
|
||||
// maxReplicas 10 10 nil
|
||||
// pods number 20 20 20
|
||||
// deletion-cost (300,-100) (200,-200) 100
|
||||
func (r *ReconcileWorkloadSpread) syncSubsetPodDeletionCost(
|
||||
ws *appsv1alpha1.WorkloadSpread,
|
||||
subset *appsv1alpha1.WorkloadSpreadSubset,
|
||||
|
|
@ -134,12 +134,7 @@ func (r *ReconcileWorkloadSpread) syncSubsetPodDeletionCost(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.updateDeletionCostForSubsetPods(ws, subset, negativePods, strconv.Itoa(wsutil.PodDeletionCostNegative*(subsetIndex+1)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return r.updateDeletionCostForSubsetPods(ws, subset, negativePods, strconv.Itoa(wsutil.PodDeletionCostNegative*(subsetIndex+1)))
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkloadSpread) updateDeletionCostForSubsetPods(ws *appsv1alpha1.WorkloadSpread,
|
||||
|
|
|
|||
|
|
@ -53,20 +53,20 @@ func convertToRegistryAuths(pullSecrets []v1.Secret, repo string) (infos []daemo
|
|||
return infos, nil
|
||||
}
|
||||
|
||||
//// Auths struct contains an embedded RegistriesStruct of name auths
|
||||
//type Auths struct {
|
||||
// // Auths struct contains an embedded RegistriesStruct of name auths
|
||||
// type Auths struct {
|
||||
// Registries RegistriesStruct `json:"auths"`
|
||||
//}
|
||||
// }
|
||||
//
|
||||
//// RegistriesStruct is a map of registries
|
||||
//type RegistriesStruct map[string]struct {
|
||||
// // RegistriesStruct is a map of registries
|
||||
// type RegistriesStruct map[string]struct {
|
||||
// Username string `json:"username"`
|
||||
// Password string `json:"password"`
|
||||
// Email string `json:"email"`
|
||||
// Auth string `json:"auth"`
|
||||
//}
|
||||
// }
|
||||
//
|
||||
//func convertToRegistryAuthInfo(secret v1.Secret, registry string) (*daemonutil.AuthInfo, error) {
|
||||
// func convertToRegistryAuthInfo(secret v1.Secret, registry string) (*daemonutil.AuthInfo, error) {
|
||||
// auths := Auths{}
|
||||
// if secret.Type == v1.SecretTypeOpaque {
|
||||
// return &daemonutil.AuthInfo{
|
||||
|
|
@ -97,9 +97,9 @@ func convertToRegistryAuths(pullSecrets []v1.Secret, repo string) (infos []daemo
|
|||
// }, nil
|
||||
// }
|
||||
// return nil, fmt.Errorf("imagePullSecret %s/%s contains neither .dockercfg nor .dockerconfigjson", secret.Namespace, secret.Name)
|
||||
//}
|
||||
// }
|
||||
|
||||
//func containsImage(c []ImageInfo, name string, tag string) bool {
|
||||
// func containsImage(c []ImageInfo, name string, tag string) bool {
|
||||
// for _, info := range c {
|
||||
// for _, repoTag := range info.RepoTags {
|
||||
// imageRepo, imageTag := daemonutil.ParseRepositoryTag(repoTag)
|
||||
|
|
@ -109,11 +109,11 @@ func convertToRegistryAuths(pullSecrets []v1.Secret, repo string) (infos []daemo
|
|||
// }
|
||||
// }
|
||||
// return false
|
||||
//}
|
||||
// }
|
||||
|
||||
type layerProgress struct {
|
||||
*dockermessage.JSONProgress
|
||||
Status string `json:"status,omitempty"` //Extracting,Pull complete,Pulling fs layer,Verifying Checksum,Downloading
|
||||
Status string `json:"status,omitempty"` // Extracting,Pull complete,Pulling fs layer,Verifying Checksum,Downloading
|
||||
}
|
||||
|
||||
type pullingProgress struct {
|
||||
|
|
@ -201,11 +201,13 @@ func (r *imagePullStatusReader) mainloop() {
|
|||
klog.V(5).Info("runtime read eof")
|
||||
r.seedPullStatus(ImagePullStatus{Process: 100, Finish: true})
|
||||
return
|
||||
} else if err != nil {
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(5).Infof("runtime read err %v", err)
|
||||
r.seedPullStatus(ImagePullStatus{Err: err, Finish: true})
|
||||
return
|
||||
} else if jm.Error != nil {
|
||||
}
|
||||
if jm.Error != nil {
|
||||
klog.V(5).Infof("runtime read err %v", jm.Error)
|
||||
r.seedPullStatus(ImagePullStatus{Err: fmt.Errorf("get error in pull response: %+v", jm.Error), Finish: true})
|
||||
return
|
||||
|
|
@ -238,8 +240,9 @@ func (c ImageInfo) ContainsImage(name string, tag string) bool {
|
|||
|
||||
// parseRepositoryTag gets a repos name and returns the right reposName + tag|digest
|
||||
// The tag can be confusing because of a port in a repository name.
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
//
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
func parseRepositoryTag(repos string) (string, string) {
|
||||
n := strings.Index(repos, "@")
|
||||
if n >= 0 {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
|
|
@ -125,8 +124,3 @@ func (su *statusUpdater) updateStatus(nodeImage *appsv1alpha1.NodeImage, newStat
|
|||
su.previousTimestamp = time.Now()
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (su *statusUpdater) statusChanged(newStatus *appsv1alpha1.NodeImageStatus) bool {
|
||||
// Can not use imagePullNode.Status to compare because of time accuracy
|
||||
return !reflect.DeepEqual(su.previousStatus, newStatus)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,8 +44,9 @@ func ParseRegistry(imageName string) string {
|
|||
|
||||
// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest
|
||||
// The tag can be confusing because of a port in a repository name.
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
//
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
func ParseRepositoryTag(repos string) (string, string) {
|
||||
n := strings.Index(repos, "@")
|
||||
if n >= 0 {
|
||||
|
|
|
|||
|
|
@ -27,14 +27,15 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
// whether selector overlaps, the criteria:
|
||||
// IsSelectorOverlapping indicates whether selector overlaps, the criteria:
|
||||
// if exist one same key has different value and not overlap, then it is judged non-overlap, for examples:
|
||||
// * a=b and a=c
|
||||
// * a in [b,c] and a not in [b,c...]
|
||||
// * a not in [b] and a not exist
|
||||
// * a=b,c=d,e=f and a=x,c=d,e=f
|
||||
// - a=b and a=c
|
||||
// - a in [b,c] and a not in [b,c...]
|
||||
// - a not in [b] and a not exist
|
||||
// - a=b,c=d,e=f and a=x,c=d,e=f
|
||||
//
|
||||
// then others is overlap:
|
||||
// * a=b and c=d
|
||||
// - a=b and c=d
|
||||
func IsSelectorOverlapping(selector1, selector2 *metav1.LabelSelector) bool {
|
||||
return !(isDisjoint(selector1, selector2) || isDisjoint(selector2, selector1))
|
||||
}
|
||||
|
|
@ -181,24 +182,24 @@ func newRequirement(key string, op selection.Operator, vals []string) (*labels.R
|
|||
return sel, nil
|
||||
}
|
||||
|
||||
// whether selectors overlap (indicates that selector1, selector2 have same key, and there is an certain intersection)
|
||||
// IsSelectorLooseOverlap indicates whether selectors overlap (indicates that selector1, selector2 have same key, and there is an certain intersection)
|
||||
// 1. when selector1、selector2 don't have same key, it is considered non-overlap, e.g. selector1(a=b) and selector2(c=d)
|
||||
// 2. when selector1、selector2 have same key, and matchLabels & matchExps are intersection, it is considered overlap.
|
||||
// For examples:
|
||||
// a In [b,c] And a Exist
|
||||
// a In [b,...] [c,...] [Include any b,c,...]
|
||||
// a NotIn [a,...] [b,....] [c,....] [All other cases are allowed except for the inclusion of both b,c...] [b,c,e]
|
||||
// a Exist And a Exist
|
||||
// a In [x,y,Any,...]
|
||||
// a NotIn [a,b,Any...]
|
||||
// a NotIn [b,c] And a Exist
|
||||
// a NotExist
|
||||
// a NotIn [a,b,Any...]
|
||||
// a In [a,b] [a,c] [e,f] [Any,...] other than [b],[c],[b,c]
|
||||
// a NotExist And a NotExist
|
||||
// a NotIn [Any,...]
|
||||
// When selector1 and selector2 contain the same key, except for the above case, they are considered non-overlap
|
||||
//
|
||||
// a In [b,c] And a Exist
|
||||
// a In [b,...] [c,...] [Include any b,c,...]
|
||||
// a NotIn [a,...] [b,....] [c,....] [All other cases are allowed except for the inclusion of both b,c...] [b,c,e]
|
||||
// a Exist And a Exist
|
||||
// a In [x,y,Any,...]
|
||||
// a NotIn [a,b,Any...]
|
||||
// a NotIn [b,c] And a Exist
|
||||
// a NotExist
|
||||
// a NotIn [a,b,Any...]
|
||||
// a In [a,b] [a,c] [e,f] [Any,...] other than [b],[c],[b,c]
|
||||
// a NotExist And a NotExist
|
||||
// a NotIn [Any,...]
|
||||
// When selector1 and selector2 contain the same key, except for the above case, they are considered non-overlap
|
||||
func IsSelectorLooseOverlap(selector1, selector2 *metav1.LabelSelector) bool {
|
||||
matchExp1 := convertSelectorToMatchExpressions(selector1)
|
||||
matchExp2 := convertSelectorToMatchExpressions(selector2)
|
||||
|
|
|
|||
|
|
@ -121,13 +121,14 @@ func ParseImage(image string) (repo, tag, digest string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
//whether image is digest format,
|
||||
//for example: docker.io/busybox@sha256:a9286defaba7b3a519d585ba0e37d0b2cbee74ebfe590960b0b1d6a5e97d1e1d
|
||||
// IsImageDigest indicates whether image is digest format,
|
||||
// for example: docker.io/busybox@sha256:a9286defaba7b3a519d585ba0e37d0b2cbee74ebfe590960b0b1d6a5e97d1e1d
|
||||
func IsImageDigest(image string) bool {
|
||||
_, _, digest, _ := ParseImage(image)
|
||||
return digest != ""
|
||||
}
|
||||
|
||||
// IsContainerImageEqual indicates whether container images are equal
|
||||
// 1. image1, image2 are digest image, compare repo+digest
|
||||
// 2. image1, image2 are normal image, compare repo+tag
|
||||
// 3. image1, image2 are digest+normal image, don't support compare it, return false
|
||||
|
|
@ -180,7 +181,7 @@ func CalculatePartitionReplicas(partition *intstrutil.IntOrString, replicasPoint
|
|||
return pValue, nil
|
||||
}
|
||||
|
||||
// check APIVersion, Kind, Name
|
||||
// IsReferenceEqual checks APIVersion, Kind, Name
|
||||
func IsReferenceEqual(ref1, ref2 appsv1alpha1.TargetReference) bool {
|
||||
gv1, err := schema.ParseGroupVersion(ref1.APIVersion)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -53,20 +53,10 @@ var (
|
|||
|
||||
// AdvancedCronJobCreateUpdateHandler handles AdvancedCronJob
|
||||
type AdvancedCronJobCreateUpdateHandler struct {
|
||||
//Client client.Client
|
||||
|
||||
// Decoder decodes objects
|
||||
Decoder *admission.Decoder
|
||||
}
|
||||
|
||||
func (h *AdvancedCronJobCreateUpdateHandler) validatingAdvancedCronJobFn(ctx context.Context, obj *appsv1alpha1.AdvancedCronJob) (bool, string, error) {
|
||||
allErrs := h.validateAdvancedCronJob(obj)
|
||||
if len(allErrs) != 0 {
|
||||
return false, "", allErrs.ToAggregate()
|
||||
}
|
||||
return true, "allowed to be admitted", nil
|
||||
}
|
||||
|
||||
func (h *AdvancedCronJobCreateUpdateHandler) validateAdvancedCronJob(obj *appsv1alpha1.AdvancedCronJob) field.ErrorList {
|
||||
allErrs := genericvalidation.ValidateObjectMeta(&obj.ObjectMeta, true, validateAdvancedCronJobName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateAdvancedCronJobSpec(&obj.Spec, field.NewPath("spec"))...)
|
||||
|
|
@ -233,14 +223,6 @@ func (h *AdvancedCronJobCreateUpdateHandler) Handle(ctx context.Context, req adm
|
|||
return admission.ValidationResponse(true, "")
|
||||
}
|
||||
|
||||
//var _ inject.Client = &AdvancedCronJobCreateUpdateHandler{}
|
||||
//
|
||||
//// InjectClient injects the client into the AdvancedCronJobCreateUpdateHandler
|
||||
//func (h *AdvancedCronJobCreateUpdateHandler) InjectClient(c client.Client) error {
|
||||
// h.Client = c
|
||||
// return nil
|
||||
//}
|
||||
|
||||
var _ admission.DecoderInjector = &AdvancedCronJobCreateUpdateHandler{}
|
||||
|
||||
// InjectDecoder injects the decoder into the AdvancedCronJobCreateUpdateHandler
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ package atomic
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
|
@ -42,13 +41,13 @@ const (
|
|||
//
|
||||
// Note:
|
||||
//
|
||||
// 1. Writer reserves the set of pathnames starting with `..`.
|
||||
// 2. Writer offers no concurrency guarantees and must be synchronized
|
||||
// by the caller.
|
||||
// 1. Writer reserves the set of pathnames starting with `..`.
|
||||
// 2. Writer offers no concurrency guarantees and must be synchronized
|
||||
// by the caller.
|
||||
//
|
||||
// The visible files in this volume are symlinks to files in the writer's data
|
||||
// directory. Actual files are stored in a hidden timestamped directory which
|
||||
// is symlinked to by the data directory. The timestamped directory and
|
||||
// is symlinked to the data directory. The timestamped directory and
|
||||
// data directory symlink are created in the writer's target dir. This scheme
|
||||
// allows the files to be atomically updated by changing the target of the
|
||||
// data directory symlink.
|
||||
|
|
@ -86,33 +85,37 @@ const (
|
|||
//
|
||||
// The Write algorithm is:
|
||||
//
|
||||
// 1. The payload is validated; if the payload is invalid, the function returns
|
||||
// 2. The current timestamped directory is detected by reading the data directory
|
||||
// symlink
|
||||
// 3. The old version of the volume is walked to determine whether any
|
||||
// portion of the payload was deleted and is still present on disk.
|
||||
// 4. The data in the current timestamped directory is compared to the projected
|
||||
// data to determine if an update is required.
|
||||
// 5. A new timestamped dir is created
|
||||
// 6. The payload is written to the new timestamped directory
|
||||
// 7. Symlinks and directory for new user-visible files are created (if needed).
|
||||
// 1. The payload is validated; if the payload is invalid, the function returns
|
||||
// 2. The current timestamped directory is detected by reading the data directory
|
||||
// symlink
|
||||
//
|
||||
// For example, consider the files:
|
||||
// <target-dir>/podName
|
||||
// <target-dir>/user/labels
|
||||
// <target-dir>/k8s/annotations
|
||||
// 3. The old version of the volume is walked to determine whether any
|
||||
// portion of the payload was deleted and is still present on disk.
|
||||
//
|
||||
// The user visible files are symbolic links into the internal data directory:
|
||||
// <target-dir>/podName -> ..data/podName
|
||||
// <target-dir>/usr -> ..data/usr
|
||||
// <target-dir>/k8s -> ..data/k8s
|
||||
// 4. The data in the current timestamped directory is compared to the projected
|
||||
// data to determine if an update is required.
|
||||
// 5. A new timestamped dir is created
|
||||
//
|
||||
// 6. The payload is written to the new timestamped directory
|
||||
// 7. Symlinks and directory for new user-visible files are created (if needed).
|
||||
//
|
||||
// For example, consider the files:
|
||||
// <target-dir>/podName
|
||||
// <target-dir>/user/labels
|
||||
// <target-dir>/k8s/annotations
|
||||
//
|
||||
// The user visible files are symbolic links into the internal data directory:
|
||||
// <target-dir>/podName -> ..data/podName
|
||||
// <target-dir>/usr -> ..data/usr
|
||||
// <target-dir>/k8s -> ..data/k8s
|
||||
//
|
||||
// The data directory itself is a link to a timestamped directory with
|
||||
// the real data:
|
||||
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
|
||||
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory
|
||||
// 9. The new data directory symlink is renamed to the data directory; rename is atomic
|
||||
//
|
||||
// The data directory itself is a link to a timestamped directory with
|
||||
// the real data:
|
||||
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
|
||||
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory
|
||||
// 9. The new data directory symlink is renamed to the data directory; rename is atomic
|
||||
// 10. Old paths are removed from the user-visible portion of the target directory
|
||||
// 11. The previous timestamped directory is removed, if it exists
|
||||
func (w *Writer) Write(payload map[string]FileProjection) error {
|
||||
|
|
@ -297,7 +300,7 @@ func shouldWriteFile(path string, content []byte) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
contentOnFs, err := ioutil.ReadFile(path)
|
||||
contentOnFs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -349,7 +352,7 @@ func (w *Writer) pathsToRemove(payload map[string]FileProjection, oldTsDir strin
|
|||
|
||||
// newTimestampDir creates a new timestamp directory
|
||||
func (w *Writer) newTimestampDir() (string, error) {
|
||||
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
|
||||
tsDir, err := os.MkdirTemp(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to create new temp directory")
|
||||
return "", err
|
||||
|
|
@ -382,12 +385,12 @@ func (w *Writer) writePayloadToDir(payload map[string]FileProjection, dir string
|
|||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(fullPath, content, mode)
|
||||
err = os.WriteFile(fullPath, content, mode)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to write file", "file", fullPath, "mode", mode)
|
||||
return err
|
||||
}
|
||||
// Chmod is needed because ioutil.WriteFile() ends up calling
|
||||
// Chmod is needed because os.WriteFile() ends up calling
|
||||
// open(2) to create the file, so the final mode used is "mode &
|
||||
// ~umask". But we want to make sure the specified mode is used
|
||||
// in the file no matter what the umask is.
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
|
|||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
|
@ -542,7 +542,7 @@ func TestValidateWorkloadSpreadCreate(t *testing.T) {
|
|||
errorSuffix: "spec.subsets[0].maxReplicas",
|
||||
},
|
||||
|
||||
//{
|
||||
// {
|
||||
// name: "one subset",
|
||||
// getWorkloadSpread: func() *appsv1alpha1.WorkloadSpread {
|
||||
// workloadSpread := workloadSpreadDemo.DeepCopy()
|
||||
|
|
@ -555,7 +555,7 @@ func TestValidateWorkloadSpreadCreate(t *testing.T) {
|
|||
// return workloadSpread
|
||||
// },
|
||||
// errorSuffix: "spec.subsets",
|
||||
//},
|
||||
// },
|
||||
{
|
||||
name: "subset[0]'name is empty",
|
||||
getWorkloadSpread: func() *appsv1alpha1.WorkloadSpread {
|
||||
|
|
@ -583,7 +583,7 @@ func TestValidateWorkloadSpreadCreate(t *testing.T) {
|
|||
},
|
||||
errorSuffix: "spec.subsets[1].name",
|
||||
},
|
||||
//{
|
||||
// {
|
||||
// name: "subset[0]'s requiredNodeSelectorTerm, preferredNodeSelectorTerms and tolerations are all empty",
|
||||
// getWorkloadSpread: func() *appsv1alpha1.WorkloadSpread {
|
||||
// workloadSpread := workloadSpreadDemo.DeepCopy()
|
||||
|
|
@ -593,7 +593,7 @@ func TestValidateWorkloadSpreadCreate(t *testing.T) {
|
|||
// return workloadSpread
|
||||
// },
|
||||
// errorSuffix: "spec.subsets[0].requiredNodeSelectorTerm",
|
||||
//},
|
||||
// },
|
||||
{
|
||||
name: "requiredNodeSelectorTerm are not valid",
|
||||
getWorkloadSpread: func() *appsv1alpha1.WorkloadSpread {
|
||||
|
|
|
|||
Loading…
Reference in New Issue