trim off native deployment controller codes
Signed-off-by: mingzhou.swx <mingzhou.swx@alibaba-inc.com>
This commit is contained in:
parent
b0c7b3b92a
commit
d834a41077
1
go.mod
1
go.mod
|
|
@ -66,7 +66,6 @@ require (
|
|||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
|
|
@ -177,18 +176,7 @@ func (r *ReconcileDeployment) Reconcile(_ context.Context, request reconcile.Req
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
type controllerFactory struct {
|
||||
client clientset.Interface
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
// dLister can list/get deployments from the shared informer's store
|
||||
dLister appslisters.DeploymentLister
|
||||
// rsLister can list/get replica sets from the shared informer's store
|
||||
rsLister appslisters.ReplicaSetLister
|
||||
// podLister can list/get pods from the shared informer's store
|
||||
podLister corelisters.PodLister
|
||||
}
|
||||
type controllerFactory DeploymentController
|
||||
|
||||
// NewController create a new DeploymentController
|
||||
// TODO: create new controller only when deployment is under our control
|
||||
|
|
|
|||
|
|
@ -26,12 +26,10 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/openkruise/rollouts/pkg/controller/deployment/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
|
|
@ -68,75 +66,6 @@ type DeploymentController struct {
|
|||
podLister corelisters.PodLister
|
||||
}
|
||||
|
||||
// getDeploymentsForReplicaSet returns a list of Deployments that potentially
|
||||
// match a ReplicaSet.
|
||||
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment {
|
||||
deployments, err := util.GetDeploymentsForReplicaSet(dc.dLister, rs)
|
||||
if err != nil || len(deployments) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
|
||||
// there should never be more than one deployment returned by the above method.
|
||||
// If that happens we should probably dynamically repair the situation by ultimately
|
||||
// trying to clean up one of the controllers, for now we just return the older one
|
||||
if len(deployments) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
klog.V(4).InfoS("user error! more than one deployment is selecting replica set",
|
||||
"replicaSet", klog.KObj(rs), "labels", rs.Labels, "deployment", klog.KObj(deployments[0]))
|
||||
}
|
||||
return deployments
|
||||
}
|
||||
|
||||
// getDeploymentForPod returns the deployment managing the given Pod.
|
||||
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment {
|
||||
// Find the owning replica set
|
||||
var rs *apps.ReplicaSet
|
||||
var err error
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
// No controller owns this Pod.
|
||||
return nil
|
||||
}
|
||||
if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
|
||||
// Not a pod owned by a replica set.
|
||||
return nil
|
||||
}
|
||||
rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil || rs.UID != controllerRef.UID {
|
||||
klog.V(4).InfoS("Cannot get replicaset for pod", "ownerReference", controllerRef.Name, "pod", klog.KObj(pod), "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now find the Deployment that owns that ReplicaSet.
|
||||
controllerRef = metav1.GetControllerOf(rs)
|
||||
if controllerRef == nil {
|
||||
return nil
|
||||
}
|
||||
return dc.resolveControllerRef(rs.Namespace, controllerRef)
|
||||
}
|
||||
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != controllerKind.Kind {
|
||||
return nil
|
||||
}
|
||||
d, err := dc.dLister.Deployments(namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if d.UID != controllerRef.UID {
|
||||
// The controller we found with this Name is not the same one that the
|
||||
// ControllerRef points to.
|
||||
return nil
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
|
||||
// ControllerRef by adopting and orphaning.
|
||||
// It returns the list of ReplicaSets that this Deployment should manage.
|
||||
|
|
@ -150,42 +79,6 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(ctx context.Context,
|
|||
return dc.rsLister.ReplicaSets(d.Namespace).List(deploymentSelector)
|
||||
}
|
||||
|
||||
// getPodMapForDeployment returns the Pods managed by a Deployment.
|
||||
//
|
||||
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
|
||||
// according to the Pod's ControllerRef.
|
||||
// NOTE: The pod pointers returned by this method point the pod objects in the cache and thus
|
||||
// shouldn't be modified in any way.
|
||||
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID][]*v1.Pod, error) {
|
||||
// Get all Pods that potentially belong to this Deployment.
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(d.Namespace).List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Group Pods by their controller (if it's in rsList).
|
||||
podMap := make(map[types.UID][]*v1.Pod, len(rsList))
|
||||
for _, rs := range rsList {
|
||||
podMap[rs.UID] = []*v1.Pod{}
|
||||
}
|
||||
for _, pod := range pods {
|
||||
// Do not ignore inactive Pods because Recreate Deployments need to verify that no
|
||||
// Pods from older versions are running before spinning up new Pods.
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
continue
|
||||
}
|
||||
// Only append if we care about this UID.
|
||||
if _, ok := podMap[controllerRef.UID]; ok {
|
||||
podMap[controllerRef.UID] = append(podMap[controllerRef.UID], pod)
|
||||
}
|
||||
}
|
||||
return podMap, nil
|
||||
}
|
||||
|
||||
// syncDeployment will sync the deployment with the given key.
|
||||
// This function is not meant to be invoked concurrently with the same key.
|
||||
func (dc *DeploymentController) syncDeployment(ctx context.Context, key string) error {
|
||||
|
|
@ -230,15 +123,6 @@ func (dc *DeploymentController) syncDeployment(ctx context.Context, key string)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// List all Pods owned by this Deployment, grouped by their ReplicaSet.
|
||||
// Current uses of the podMap are:
|
||||
//
|
||||
// * check if a Pod is labeled correctly with the pod-template-hash label.
|
||||
// * check that no old Pods are running in the middle of Recreate Deployments.
|
||||
podMap, err := dc.getPodMapForDeployment(d, rsList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.DeletionTimestamp != nil {
|
||||
return dc.syncStatusOnly(ctx, d, rsList)
|
||||
|
|
@ -255,26 +139,14 @@ func (dc *DeploymentController) syncDeployment(ctx context.Context, key string)
|
|||
return dc.sync(ctx, d, rsList)
|
||||
}
|
||||
|
||||
// rollback is not re-entrant in case the underlying replica sets are updated with a new
|
||||
// revision so we should ensure that we won't proceed to update replica sets until we
|
||||
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
|
||||
if getRollbackTo(d) != nil {
|
||||
return dc.rollback(ctx, d, rsList)
|
||||
}
|
||||
|
||||
scalingEvent, err := dc.isScalingEvent(ctx, d, rsList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if scalingEvent {
|
||||
return dc.sync(ctx, d, rsList)
|
||||
}
|
||||
|
||||
switch d.Spec.Strategy.Type {
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
return dc.rolloutRecreate(ctx, d, rsList, podMap)
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
return dc.rolloutRolling(ctx, d, rsList)
|
||||
}
|
||||
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
|
||||
return dc.rolloutRolling(ctx, d, rsList)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openkruise/rollouts/pkg/controller/deployment/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// rolloutRecreate implements the logic for recreating a replica set.
|
||||
func (dc *DeploymentController) rolloutRecreate(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) error {
|
||||
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allRSs := append(oldRSs, newRS)
|
||||
activeOldRSs := util.FilterActiveReplicaSets(oldRSs)
|
||||
|
||||
// scale down old replica sets.
|
||||
scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(ctx, activeOldRSs, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if scaledDown {
|
||||
// Update DeploymentStatus.
|
||||
return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
|
||||
}
|
||||
|
||||
// Do not process a deployment when it has old pods running.
|
||||
if oldPodsRunning(newRS, oldRSs, podMap) {
|
||||
return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
|
||||
}
|
||||
|
||||
// If we need to create a new RS, create it now.
|
||||
if newRS == nil {
|
||||
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
}
|
||||
|
||||
// scale up new replica set.
|
||||
if _, err := dc.scaleUpNewReplicaSetForRecreate(ctx, newRS, d); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if util.DeploymentComplete(d, &d.Status) {
|
||||
if err := dc.cleanupDeployment(ctx, oldRSs, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Sync deployment status.
|
||||
return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
|
||||
}
|
||||
|
||||
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(ctx context.Context, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
scaled := false
|
||||
for i := range oldRSs {
|
||||
rs := oldRSs[i]
|
||||
// Scaling not required.
|
||||
if *(rs.Spec.Replicas) == 0 {
|
||||
continue
|
||||
}
|
||||
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(ctx, rs, 0, deployment)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if scaledRS {
|
||||
oldRSs[i] = updatedRS
|
||||
scaled = true
|
||||
}
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
||||
|
||||
// oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods.
|
||||
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) bool {
|
||||
if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 {
|
||||
return true
|
||||
}
|
||||
for rsUID, podList := range podMap {
|
||||
// If the pods belong to the new ReplicaSet, ignore.
|
||||
if newRS != nil && newRS.UID == rsUID {
|
||||
continue
|
||||
}
|
||||
for _, pod := range podList {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
// Don't count pods in terminal state.
|
||||
continue
|
||||
case v1.PodUnknown:
|
||||
// v1.PodUnknown is a deprecated status.
|
||||
// This logic is kept for backward compatibility.
|
||||
// This used to happen in situation like when the node is temporarily disconnected from the cluster.
|
||||
// If we can't be sure that the pod is not running, we have to count it.
|
||||
return true
|
||||
default:
|
||||
// Pod is not in terminal phase.
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
|
||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(ctx context.Context, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
|
|
@ -1,149 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
deploymentutil "github.com/openkruise/rollouts/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
|
||||
func (dc *DeploymentController) rollback(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allRSs := append(allOldRSs, newRS)
|
||||
rollbackTo := getRollbackTo(d)
|
||||
// If rollback revision is 0, rollback to the last revision
|
||||
if rollbackTo.Revision == 0 {
|
||||
if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 {
|
||||
// If we still can't find the last revision, gives up rollback
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
|
||||
// Gives up rollback
|
||||
return dc.updateDeploymentAndClearRollbackTo(ctx, d)
|
||||
}
|
||||
}
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
continue
|
||||
}
|
||||
if v == rollbackTo.Revision {
|
||||
klog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
// rollback by copying podTemplate.Spec from the replica set
|
||||
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
|
||||
// no-op if the spec matches current deployment's podTemplate.Spec
|
||||
performedRollback, err := dc.rollbackToTemplate(ctx, d, rs)
|
||||
if performedRollback && err == nil {
|
||||
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
|
||||
// Gives up rollback
|
||||
return dc.updateDeploymentAndClearRollbackTo(ctx, d)
|
||||
}
|
||||
|
||||
// rollbackToTemplate compares the templates of the provided deployment and replica set and
|
||||
// updates the deployment with the replica set template in case they are different. It also
|
||||
// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
|
||||
func (dc *DeploymentController) rollbackToTemplate(ctx context.Context, d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
|
||||
performedRollback := false
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template)
|
||||
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
|
||||
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
|
||||
//
|
||||
// For example,
|
||||
// A Deployment has old RS1 with annotation {change-cause:create}, and new RS2 {change-cause:edit}.
|
||||
// Note that both annotations are copied from Deployment, and the Deployment should be annotated {change-cause:edit} as well.
|
||||
// Now, rollback Deployment to RS1, we should update Deployment's pod-template and also copy annotation from RS1.
|
||||
// Deployment is now annotated {change-cause:create}, and we have new RS1 {change-cause:create}, old RS2 {change-cause:edit}.
|
||||
//
|
||||
// If we don't copy the annotations back from RS to deployment on rollback, the Deployment will stay as {change-cause:edit},
|
||||
// and new RS1 becomes {change-cause:edit} (copied from deployment after rollback), old RS2 {change-cause:edit}, which is not correct.
|
||||
deploymentutil.SetDeploymentAnnotationsTo(d, rs)
|
||||
performedRollback = true
|
||||
} else {
|
||||
klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
|
||||
eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name)
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
|
||||
}
|
||||
|
||||
return performedRollback, dc.updateDeploymentAndClearRollbackTo(ctx, d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) {
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
}
|
||||
|
||||
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
|
||||
// It is assumed that the caller will have updated the deployment template appropriately (in case
|
||||
// we want to rollback).
|
||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(ctx context.Context, d *apps.Deployment) error {
|
||||
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
setRollbackTo(d, nil)
|
||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(ctx, d, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
|
||||
func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig {
|
||||
// Extract the annotation used for round-tripping the deprecated RollbackTo field.
|
||||
revision := d.Annotations[apps.DeprecatedRollbackTo]
|
||||
if revision == "" {
|
||||
return nil
|
||||
}
|
||||
revision64, err := strconv.ParseInt(revision, 10, 64)
|
||||
if err != nil {
|
||||
// If it's invalid, ignore it.
|
||||
return nil
|
||||
}
|
||||
return &extensions.RollbackConfig{
|
||||
Revision: revision64,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
|
||||
func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) {
|
||||
if rollbackTo == nil {
|
||||
delete(d.Annotations, apps.DeprecatedRollbackTo)
|
||||
return
|
||||
}
|
||||
if d.Annotations == nil {
|
||||
d.Annotations = make(map[string]string)
|
||||
}
|
||||
d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10)
|
||||
}
|
||||
|
|
@ -58,13 +58,6 @@ func (dc *DeploymentController) sync(ctx context.Context, d *apps.Deployment, rs
|
|||
return err
|
||||
}
|
||||
|
||||
// Clean up the deployment when it's paused and no rollback is in flight.
|
||||
if d.Spec.Paused && getRollbackTo(d) == nil {
|
||||
if err := dc.cleanupDeployment(ctx, oldRSs, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
allRSs := append(oldRSs, newRS)
|
||||
return dc.syncDeploymentStatus(ctx, allRSs, newRS, d)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
|
@ -30,17 +29,11 @@ import (
|
|||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/integer"
|
||||
|
||||
labelsutil "github.com/openkruise/rollouts/pkg/util/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -57,13 +50,6 @@ const (
|
|||
// proportions in case the deployment has surge replicas.
|
||||
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
|
||||
|
||||
// RollbackRevisionNotFound is not found rollback event reason
|
||||
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
|
||||
// RollbackTemplateUnchanged is the template unchanged rollback event reason
|
||||
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
|
||||
// RollbackDone is the done rollback event reason
|
||||
RollbackDone = "DeploymentRollback"
|
||||
|
||||
// Reasons for deployment conditions
|
||||
//
|
||||
// Progressing:
|
||||
|
|
@ -197,23 +183,6 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
|||
return max
|
||||
}
|
||||
|
||||
// LastRevision finds the second max revision number in all replica sets (the last revision)
|
||||
func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
max, secMax := int64(0), int64(0)
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
} else if v >= max {
|
||||
secMax = max
|
||||
max = v
|
||||
} else if v > secMax {
|
||||
secMax = v
|
||||
}
|
||||
}
|
||||
return secMax
|
||||
}
|
||||
|
||||
// Revision returns the revision number of the input object.
|
||||
func Revision(obj runtime.Object) (int64, error) {
|
||||
acc, err := meta.Accessor(obj)
|
||||
|
|
@ -329,28 +298,6 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps
|
|||
return rsAnnotationsChanged
|
||||
}
|
||||
|
||||
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
|
||||
// This action should be done if and only if the deployment is rolling back to this rs.
|
||||
// Note that apply and revision annotations are not changed.
|
||||
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
|
||||
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
|
||||
for k, v := range rollbackToRS.Annotations {
|
||||
if !skipCopyAnnotation(k) {
|
||||
deployment.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSkippedAnnotations(annotations map[string]string) map[string]string {
|
||||
skippedAnnotations := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if skipCopyAnnotation(k) {
|
||||
skippedAnnotations[k] = v
|
||||
}
|
||||
}
|
||||
return skippedAnnotations
|
||||
}
|
||||
|
||||
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
|
||||
// replica set. If there are more active replica sets, then we should proportionally scale them.
|
||||
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
|
||||
|
|
@ -510,21 +457,6 @@ func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
|
|||
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
// RsListFromClient returns an rsListFunc that wraps the given client.
|
||||
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
|
||||
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
|
||||
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret []*apps.ReplicaSet
|
||||
for i := range rsList.Items {
|
||||
ret = append(ret, &rsList.Items[i])
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: switch RsListFunc and podListFunc to full namespacers
|
||||
|
||||
// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
|
||||
|
|
@ -643,16 +575,6 @@ func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet)
|
|||
return requiredRSs, allRSs
|
||||
}
|
||||
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
deployment.Spec.Template.Spec = template.Spec
|
||||
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
|
||||
deployment.Spec.Template.ObjectMeta.Labels,
|
||||
apps.DefaultDeploymentUniqueLabelKey)
|
||||
return deployment
|
||||
}
|
||||
|
||||
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
|
||||
totalReplicas := int32(0)
|
||||
|
|
@ -825,19 +747,6 @@ func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
|
|||
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
|
||||
}
|
||||
|
||||
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
// Returns error if polling timesout.
|
||||
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
|
||||
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := getDeploymentFunc()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
|
||||
// step. For example:
|
||||
//
|
||||
|
|
@ -882,42 +791,6 @@ func HasRevisionHistoryLimit(d *apps.Deployment) bool {
|
|||
return d.Spec.RevisionHistoryLimit != nil && *d.Spec.RevisionHistoryLimit != math.MaxInt32
|
||||
}
|
||||
|
||||
// GetDeploymentsForReplicaSet returns a list of Deployments that potentially
|
||||
// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef
|
||||
// will actually manage it.
|
||||
// Returns an error only if no matching Deployments are found.
|
||||
func GetDeploymentsForReplicaSet(deploymentLister appslisters.DeploymentLister, rs *apps.ReplicaSet) ([]*apps.Deployment, error) {
|
||||
if len(rs.Labels) == 0 {
|
||||
return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
|
||||
}
|
||||
|
||||
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
|
||||
dList, err := deploymentLister.Deployments(rs.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var deployments []*apps.Deployment
|
||||
for _, d := range dList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
if err != nil {
|
||||
// This object has an invalid selector, it does not match the replicaset
|
||||
continue
|
||||
}
|
||||
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
|
||||
continue
|
||||
}
|
||||
deployments = append(deployments, d)
|
||||
}
|
||||
|
||||
if len(deployments) == 0 {
|
||||
return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
|
||||
}
|
||||
|
||||
return deployments, nil
|
||||
}
|
||||
|
||||
// ReplicaSetsByRevision sorts a list of ReplicaSet by revision, using their creation timestamp or name as a tie breaker.
|
||||
// By using the creation timestamp, this sorts from old to new replica sets.
|
||||
type ReplicaSetsByRevision []*apps.ReplicaSet
|
||||
|
|
|
|||
Loading…
Reference in New Issue