Support StatefulSetAutoDeletePVC feature (#882)

* Add StatefulSetAutoDeletePVC feature gate

Signed-off-by: veophi <vec.g.sun@gmail.com>

* statefulset PersistentVolumeClaimDeletePolicy api change

Signed-off-by: veophi <vec.g.sun@gmail.com>

* controller change for statefulset auto-delete (implementation)

Signed-off-by: veophi <vec.g.sun@gmail.com>

* controller change for statefulset auto-delete (tests)

Signed-off-by: veophi <vec.g.sun@gmail.com>

* fix the underlying pollution to informer in statefulset controller

Signed-off-by: veophi <vec.g.sun@gmail.com>

Co-authored-by: Matthew Cary <mattcary@google.com>
This commit is contained in:
Wei-Xiang Sun 2022-03-22 12:13:51 +08:00 committed by GitHub
parent 234403c97d
commit ce28404a5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 2770 additions and 684 deletions

View File

@ -18,6 +18,8 @@ package defaults
import (
"github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/util/intstr"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
@ -56,6 +58,18 @@ func SetDefaultsStatefulSet(obj *v1beta1.StatefulSet, injectTemplateDefaults boo
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil {
obj.Spec.PersistentVolumeClaimRetentionPolicy = &v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = v1beta1.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = v1beta1.RetainPersistentVolumeClaimRetentionPolicyType
}
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = utilpointer.Int32Ptr(1)
}

View File

@ -109,6 +109,40 @@ const (
InPlaceOnlyPodUpdateStrategyType PodUpdateStrategyType = "InPlaceOnly"
)
// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine
// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is
// deleted or scaled down.
type PersistentVolumeClaimRetentionPolicyType string
const (
// RetainPersistentVolumeClaimRetentionPolicyType is the default
// PersistentVolumeClaimRetentionPolicy and specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will not be deleted.
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
// DeletePersistentVolumeClaimRetentionPolicyType specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will be deleted in the scenario specified in
// StatefulSetPersistentVolumeClaimPolicy.
DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
)
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
// created from the StatefulSet VolumeClaimTemplates.
type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
// WhenDeleted specifies what happens to PVCs created from StatefulSet
// VolumeClaimTemplates when the StatefulSet is deleted. The default policy
// of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
// `Delete` policy causes those PVCs to be deleted.
WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
// WhenScaled specifies what happens to PVCs created from StatefulSet
// VolumeClaimTemplates when the StatefulSet is scaled down. The default
// policy of `Retain` causes PVCs to not be affected by a scaledown. The
// `Delete` policy causes the associated PVCs for any excess pods above
// the replica count to be deleted.
WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetSpec defines the desired state of StatefulSet
type StatefulSetSpec struct {
// replicas is the desired number of replicas of the given Template.
@ -188,6 +222,12 @@ type StatefulSetSpec struct {
// scaleStrategy indicates the StatefulSetScaleStrategy that will be
// employed to scale Pods in the StatefulSet.
ScaleStrategy *StatefulSetScaleStrategy `json:"scaleStrategy,omitempty"`
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
// the StatefulSet VolumeClaimTemplates. This requires the
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"`
}
// StatefulSetScaleStrategy defines strategies for pods scale.

View File

@ -129,6 +129,21 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy {
if in == nil {
return nil
}
out := new(StatefulSetPersistentVolumeClaimRetentionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetScaleStrategy) DeepCopyInto(out *StatefulSetScaleStrategy) {
*out = *in
@ -191,6 +206,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = new(StatefulSetScaleStrategy)
(*in).DeepCopyInto(*out)
}
if in.PersistentVolumeClaimRetentionPolicy != nil {
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.

View File

@ -528,6 +528,27 @@ spec:
type: object
type: object
type: object
persistentVolumeClaimRetentionPolicy:
description: PersistentVolumeClaimRetentionPolicy describes the policy
used for PVCs created from the StatefulSet VolumeClaimTemplates.
This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
which is alpha.
properties:
whenDeleted:
description: WhenDeleted specifies what happens to PVCs created
from StatefulSet VolumeClaimTemplates when the StatefulSet is
deleted. The default policy of `Retain` causes PVCs to not be
affected by StatefulSet deletion. The `Delete` policy causes
those PVCs to be deleted.
type: string
whenScaled:
description: WhenScaled specifies what happens to PVCs created
from StatefulSet VolumeClaimTemplates when the StatefulSet is
scaled down. The default policy of `Retain` causes PVCs to not
be affected by a scaledown. The `Delete` policy causes the associated
PVCs for any excess pods above the replica count to be deleted.
type: string
type: object
podManagementPolicy:
description: podManagementPolicy controls how pods are created during
initial scale up, when replacing pods on nodes, or when scaling

View File

@ -160,6 +160,30 @@ spec:
type: object
type: object
type: object
persistentVolumeClaimRetentionPolicy:
description: PersistentVolumeClaimRetentionPolicy describes
the policy used for PVCs created from the StatefulSet
VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC
feature gate to be enabled, which is alpha.
properties:
whenDeleted:
description: WhenDeleted specifies what happens to
PVCs created from StatefulSet VolumeClaimTemplates
when the StatefulSet is deleted. The default policy
of `Retain` causes PVCs to not be affected by StatefulSet
deletion. The `Delete` policy causes those PVCs
to be deleted.
type: string
whenScaled:
description: WhenScaled specifies what happens to
PVCs created from StatefulSet VolumeClaimTemplates
when the StatefulSet is scaled down. The default
policy of `Retain` causes PVCs to not be affected
by a scaledown. The `Delete` policy causes the associated
PVCs for any excess pods above the replica count
to be deleted.
type: string
type: object
podManagementPolicy:
description: podManagementPolicy controls how pods are
created during initial scale up, when replacing pods

2
go.sum
View File

@ -938,7 +938,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
@ -1022,7 +1021,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=

View File

@ -22,8 +22,6 @@ import (
"fmt"
"strings"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -33,64 +31,114 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
)
// StatefulPodControlInterface defines the interface that StatefulSetController uses to create, update, and delete Pods,
// and to update the Status of a StatefulSet. It follows the design paradigms used for PodControl, but its
// implementation provides for PVC creation, ordered Pod creation, ordered Pod termination, and Pod identity enforcement.
// Like controller.PodControlInterface, it is implemented as an interface to provide for testing fakes.
type StatefulPodControlInterface interface {
// CreateStatefulPod create a Pod in a StatefulSet. Any PVCs necessary for the Pod are created prior to creating
// the Pod. If the returned error is nil the Pod and its PVCs have been created.
CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error
// UpdateStatefulPod Updates a Pod in a StatefulSet. If the Pod already has the correct identity and stable
// storage this method is a no-op. If the Pod must be mutated to conform to the Set, it is mutated and updated.
// pod is an in-out parameter, and any updates made to the pod are reflected as mutations to this parameter. If
// the create is successful, the returned error is nil.
UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error
// DeleteStatefulPod deletes a Pod in a StatefulSet. The pods PVCs are not deleted. If the delete is successful,
// the returned error is nil.
DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error
// StatefulPodControlObjectManager abstracts the manipulation of Pods and PVCs. The real controller implements this
// with a clientset for writes and listers for reads; for tests we provide stubs.
type StatefulPodControlObjectManager interface {
CreatePod(pod *v1.Pod) error
GetPod(namespace, podName string) (*v1.Pod, error)
UpdatePod(pod *v1.Pod) error
DeletePod(pod *v1.Pod) error
CreateClaim(claim *v1.PersistentVolumeClaim) error
GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error)
UpdateClaim(claim *v1.PersistentVolumeClaim) error
}
// NewRealStatefulPodControl returns a new realStatefulPodControl
func NewRealStatefulPodControl(
client clientset.Interface,
setLister kruiseappslisters.StatefulSetLister,
podLister corelisters.PodLister,
pvcLister corelisters.PersistentVolumeClaimLister,
recorder record.EventRecorder,
) StatefulPodControlInterface {
return &realStatefulPodControl{client, setLister, podLister, pvcLister, recorder}
}
// realStatefulPodControl implements StatefulPodControlInterface using a clientset.Interface to communicate with the
// API server. The struct is package private as the internal details are irrelevant to importing packages.
type realStatefulPodControl struct {
client clientset.Interface
setLister kruiseappslisters.StatefulSetLister
podLister corelisters.PodLister
pvcLister corelisters.PersistentVolumeClaimLister
// StatefulPodControl defines the interface that StatefulSetController uses to create, update, and delete Pods,
// Manipulation of objects is provided through objectMgr, which allows the k8s API to be mocked out for testing.
type StatefulPodControl struct {
objectMgr StatefulPodControlObjectManager
recorder record.EventRecorder
}
func (spc *realStatefulPodControl) CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
// NewStatefulPodControl constructs a StatefulPodControl using a realStatefulPodControlObjectManager with the given
// clientset, listers and EventRecorder.
func NewStatefulPodControl(
client clientset.Interface,
setLister kruiseappslisters.StatefulSetLister,
podLister corelisters.PodLister,
claimLister corelisters.PersistentVolumeClaimLister,
recorder record.EventRecorder,
) *StatefulPodControl {
return &StatefulPodControl{&realStatefulPodControlObjectManager{client, setLister, podLister, claimLister}, recorder}
}
// NewStatefulPodControlFromManager creates a StatefulPodControl using the given StatefulPodControlObjectManager and recorder.
func NewStatefulPodControlFromManager(om StatefulPodControlObjectManager, recorder record.EventRecorder) *StatefulPodControl {
return &StatefulPodControl{om, recorder}
}
// realStatefulPodControlObjectManager uses a clientset.Interface and listers.
type realStatefulPodControlObjectManager struct {
client clientset.Interface
setLister kruiseappslisters.StatefulSetLister
podLister corelisters.PodLister
claimLister corelisters.PersistentVolumeClaimLister
}
func (om *realStatefulPodControlObjectManager) CreatePod(pod *v1.Pod) error {
_, err := om.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
return err
}
func (om *realStatefulPodControlObjectManager) GetPod(namespace, podName string) (*v1.Pod, error) {
return om.podLister.Pods(namespace).Get(podName)
}
func (om *realStatefulPodControlObjectManager) UpdatePod(pod *v1.Pod) error {
_, err := om.client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
return err
}
func (om *realStatefulPodControlObjectManager) DeletePod(pod *v1.Pod) error {
return om.client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
}
func (om *realStatefulPodControlObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error {
_, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{})
return err
}
func (om *realStatefulPodControlObjectManager) GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) {
return om.claimLister.PersistentVolumeClaims(namespace).Get(claimName)
}
func (om *realStatefulPodControlObjectManager) UpdateClaim(claim *v1.PersistentVolumeClaim) error {
_, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
return err
}
func (spc *StatefulPodControl) CreateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
// Create the Pod's PVCs prior to creating the Pod
if err := spc.createPersistentVolumeClaims(set, pod); err != nil {
spc.recordPodEvent("create", set, pod, err)
return err
}
// If we created the PVCs attempt to create the Pod
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
err := spc.objectMgr.CreatePod(pod)
// sink already exists errors
if apierrors.IsAlreadyExists(err) {
return err
}
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
// Set PVC policy as much as is possible at this point.
if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil {
spc.recordPodEvent("update", set, pod, err)
return err
}
}
spc.recordPodEvent("create", set, pod, err)
return err
}
func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
func (spc *StatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
attemptedUpdate := false
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
// assume the Pod is consistent
@ -110,6 +158,21 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe
return err
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
// if the Pod's PVCs are not consistent with the StatefulSet's PVC deletion policy, update the PVC
// and dirty the pod.
if match, err := spc.ClaimsMatchRetentionPolicy(set, pod); err != nil {
spc.recordPodEvent("update", set, pod, err)
return err
} else if !match {
if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil {
spc.recordPodEvent("update", set, pod, err)
return err
}
consistent = false
}
}
// if the Pod is not dirty, do nothing
if consistent {
return nil
@ -117,16 +180,17 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe
attemptedUpdate = true
// commit the update, retrying on conflicts
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
updateErr := spc.objectMgr.UpdatePod(pod)
if updateErr == nil {
return nil
}
if updated, err := spc.podLister.Pods(set.Namespace).Get(pod.Name); err == nil {
if updated, err := spc.objectMgr.GetPod(set.Namespace, pod.Name); err == nil {
// make a copy so we don't mutate the shared cache
pod = updated.DeepCopy()
} else {
utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s from lister: %v", set.Namespace, pod.Name, err))
utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s: %w", set.Namespace, pod.Name, err))
}
return updateErr
@ -137,15 +201,93 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *appsv1beta1.StatefulSe
return err
}
func (spc *realStatefulPodControl) DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
func (spc *StatefulPodControl) DeleteStatefulPod(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
err := spc.objectMgr.DeletePod(pod)
spc.recordPodEvent("delete", set, pod, err)
return err
}
// ClaimsMatchRetentionPolicy returns false if the PVCs for pod are not consistent with set's PVC deletion policy.
// An error is returned if something is not consistent. This is expected if the pod is being otherwise updated,
// but a problem otherwise (see usage of this method in UpdateStatefulPod).
func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(set *appsv1beta1.StatefulSet, pod *v1.Pod) (bool, error) {
ordinal := getOrdinal(pod)
templates := set.Spec.VolumeClaimTemplates
for i := range templates {
claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal)
claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName)
switch {
case apierrors.IsNotFound(err):
klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration", claimName)
case err != nil:
return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
default:
if !claimOwnerMatchesSetAndPod(claim, set, pod) {
return false, nil
}
}
}
return true, nil
}
// UpdatePodClaimForRetentionPolicy updates the PVCs used by pod to match the PVC deletion policy of set.
func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
ordinal := getOrdinal(pod)
templates := set.Spec.VolumeClaimTemplates
for i := range templates {
claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal)
claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName)
switch {
case apierrors.IsNotFound(err):
klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration.")
case err != nil:
return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
default:
if !claimOwnerMatchesSetAndPod(claim, set, pod) {
claimClone := claim.DeepCopy()
needsUpdate := updateClaimOwnerRefForSetAndPod(claimClone, set, pod)
if needsUpdate {
err := spc.objectMgr.UpdateClaim(claimClone)
if err != nil {
return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err)
}
}
}
}
}
return nil
}
// PodClaimIsStale returns true for a stale PVC that should block pod creation. If the scaling
// policy is deletion, and a PVC has an ownerRef that does not match the pod, the PVC is stale. This
// includes pods whose UID has not been created.
func (spc *StatefulPodControl) PodClaimIsStale(set *appsv1beta1.StatefulSet, pod *v1.Pod) (bool, error) {
policy := getPersistentVolumeClaimRetentionPolicy(set)
if policy.WhenScaled == appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType {
// PVCs are meant to be reused and so can't be stale.
return false, nil
}
for _, claim := range getPersistentVolumeClaims(set, pod) {
pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name)
switch {
case apierrors.IsNotFound(err):
// If the claim doesn't exist yet, it can't be stale.
continue
case err != nil:
return false, err
case err == nil:
// A claim is stale if it doesn't match the pod's UID, including if the pod has no UID.
if hasStaleOwnerRef(pvc, pod) {
return true, nil
}
}
}
return false, nil
}
// recordPodEvent records an event for verb applied to a Pod in a StatefulSet. If err is nil the generated event will
// have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a reason of v1.EventTypeWarning.
func (spc *realStatefulPodControl) recordPodEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, err error) {
func (spc *StatefulPodControl) recordPodEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, err error) {
if err == nil {
reason := fmt.Sprintf("Successful%s", strings.Title(verb))
message := fmt.Sprintf("%s Pod %s in StatefulSet %s successful",
@ -162,7 +304,7 @@ func (spc *realStatefulPodControl) recordPodEvent(verb string, set *appsv1beta1.
// recordClaimEvent records an event for verb applied to the PersistentVolumeClaim of a Pod in a StatefulSet. If err is
// nil the generated event will have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a
// reason of v1.EventTypeWarning.
func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) {
func (spc *StatefulPodControl) recordClaimEvent(verb string, set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) {
if err == nil {
reason := fmt.Sprintf("Successful%s", strings.Title(verb))
message := fmt.Sprintf("%s Claim %s Pod %s in StatefulSet %s success",
@ -180,13 +322,13 @@ func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *appsv1beta
// set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method
// may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with
// set's Spec.
func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
func (spc *StatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1.StatefulSet, pod *v1.Pod) error {
var errs []error
for _, claim := range getPersistentVolumeClaims(set, pod) {
pvc, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name)
switch {
case apierrors.IsNotFound(err):
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim, metav1.CreateOptions{})
err := spc.objectMgr.CreateClaim(&claim)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
}
@ -200,8 +342,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *appsv1beta1
errs = append(errs, fmt.Errorf("pvc %s is to be deleted", claim.Name))
}
// TODO: Check resource requirements and accessmodes, update if necessary
// Don't forget to deep copy the PVC if you need to update it
}
return errorutils.NewAggregate(errs)
}
var _ StatefulPodControlInterface = &realStatefulPodControl{}

View File

@ -18,15 +18,19 @@ limitations under the License.
package statefulset
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
corelisters "k8s.io/client-go/listers/core/v1"
core "k8s.io/client-go/testing"
@ -34,6 +38,10 @@ import (
"k8s.io/client-go/tools/record"
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
)
func TestStatefulPodControlCreatesPods(t *testing.T) {
@ -41,14 +49,15 @@ func TestStatefulPodControlCreatesPods(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder)
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), action.GetResource().Resource)
})
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
claimIndexer.Add(create.GetObject())
return true, create.GetObject(), nil
})
fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
@ -81,7 +90,7 @@ func TestStatefulPodControlCreatePodExists(t *testing.T) {
pvcIndexer.Add(&pvc)
}
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
@ -108,7 +117,7 @@ func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) {
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
@ -130,7 +139,7 @@ func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) {
}
}
func TestStatefulPodControlCreatePodPvcDeleting(t *testing.T) {
func TestStatefulPodControlCreatePodPVCDeleting(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
@ -144,7 +153,7 @@ func TestStatefulPodControlCreatePodPvcDeleting(t *testing.T) {
pvcIndexer.Add(&pvc)
}
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
@ -183,7 +192,7 @@ func TestStatefulPodControlCreatePodPvcGetFailure(t *testing.T) {
fakeClient := &fake.Clientset{}
pvcIndexer := &fakeIndexer{getError: errors.New("API server down")}
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
@ -212,7 +221,7 @@ func TestStatefulPodControlCreatePodFailed(t *testing.T) {
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
fakeClient.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
@ -231,7 +240,6 @@ func TestStatefulPodControlCreatePodFailed(t *testing.T) {
} else if !strings.Contains(events[1], v1.EventTypeWarning) {
t.Errorf("Found unexpected non-warning event %s", events[1])
}
}
@ -240,7 +248,14 @@ func TestStatefulPodControlNoOpUpdate(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
claims := getPersistentVolumeClaims(set, pod)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for k := range claims {
claim := claims[k]
indexer.Add(&claim)
}
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder)
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
t.Error("no-op update should not make any client invocation")
return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem"))
@ -259,7 +274,9 @@ func TestStatefulPodControlUpdatesIdentity(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := fake.NewSimpleClientset(pod)
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, recorder)
var updated *v1.Pod
fakeClient.PrependReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
@ -286,12 +303,14 @@ func TestStatefulPodControlUpdateIdentityFailure(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
gooPod := newStatefulSetPod(set, 0)
gooPod.Name = "goo-0"
indexer.Add(gooPod)
podLister := corelisters.NewPodLister(indexer)
control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder)
podIndexer.Add(gooPod)
podLister := corelisters.NewPodLister(podIndexer)
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder)
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
pod.Name = "goo-0"
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
@ -318,7 +337,7 @@ func TestStatefulPodControlUpdatesPodStorage(t *testing.T) {
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
pvcs := getPersistentVolumeClaims(set, pod)
volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes))
for i := range pod.Spec.Volumes {
@ -365,7 +384,7 @@ func TestStatefulPodControlUpdatePodStorageFailure(t *testing.T) {
fakeClient := &fake.Clientset{}
pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcIndexer)
control := NewRealStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, pvcLister, recorder)
pvcs := getPersistentVolumeClaims(set, pod)
volumes := make([]v1.Volume, 0, len(pod.Spec.Volumes))
for i := range pod.Spec.Volumes {
@ -400,12 +419,19 @@ func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podLister := corelisters.NewPodLister(podIndexer)
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(podIndexer)
gooPod := newStatefulSetPod(set, 0)
gooPod.Name = "goo-0"
indexer.Add(gooPod)
podLister := corelisters.NewPodLister(indexer)
control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder)
gooPod.Labels[apps.StatefulSetPodNameLabel] = "goo-starts"
podIndexer.Add(gooPod)
claims := getPersistentVolumeClaims(set, gooPod)
for k := range claims {
claim := claims[k]
claimIndexer.Add(&claim)
}
control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder)
conflict := false
fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
@ -415,7 +441,7 @@ func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) {
}
return true, update.GetObject(), nil
})
pod.Name = "goo-0"
pod.Labels[apps.StatefulSetPodNameLabel] = "goo-0"
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
@ -435,7 +461,7 @@ func TestStatefulPodControlDeletesStatefulPod(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
@ -455,7 +481,7 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
control := NewStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("delete", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
@ -470,6 +496,346 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) {
}
}
func TestStatefulPodControlClaimsMatchDeletionPolcy(t *testing.T) {
// The claimOwnerMatchesSetAndPod is tested exhaustively in stateful_set_utils_test; this
// test is for the wiring to the method tested there.
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 0)
claims := getPersistentVolumeClaims(set, pod)
for k := range claims {
claim := claims[k]
indexer.Add(&claim)
}
control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, &noopRecorder{})
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
if matches, err := control.ClaimsMatchRetentionPolicy(set, pod); err != nil {
t.Errorf("Unexpected error for ClaimsMatchRetentionPolicy (retain): %v", err)
} else if !matches {
t.Error("Unexpected non-match for ClaimsMatchRetentionPolicy (retain)")
}
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
if matches, err := control.ClaimsMatchRetentionPolicy(set, pod); err != nil {
t.Errorf("Unexpected error for ClaimsMatchRetentionPolicy (set deletion): %v", err)
} else if matches {
t.Error("Unexpected match for ClaimsMatchRetentionPolicy (set deletion)")
}
}
func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) {
// All the update conditions are tested exhaustively in stateful_set_utils_test. This
// tests the wiring from the pod control to that method.
testFn := func(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
set := newStatefulSet(3)
set.GetObjectMeta().SetUID("set-123")
pod := newStatefulSetPod(set, 0)
claims := getPersistentVolumeClaims(set, pod)
claimObjects := make([]runtime.Object, 0)
for k := range claims {
claim := claims[k]
indexer.Add(&claim)
claimObjects = append(claimObjects, &claim)
}
fakeClient := fake.NewSimpleClientset(claimObjects...)
control := NewStatefulPodControl(fakeClient, nil, nil, claimLister, &noopRecorder{})
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
if err := control.UpdatePodClaimForRetentionPolicy(set, pod); err != nil {
t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain): %v", err)
}
expectRef := utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC)
for k := range claims {
claim, err := fakeClient.CoreV1().PersistentVolumeClaims(claims[k].Namespace).Get(context.TODO(), claims[k].Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
if hasOwnerRef(claim, set) != expectRef {
t.Errorf("Claim %s/%s bad set owner ref", claim.Namespace, claim.Name)
}
}
}
t.Run("StatefulSetAutoDeletePVCEnabled", func(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
testFn(t)
})
t.Run("StatefulSetAutoDeletePVCDisabled", func(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
testFn(t)
})
}
func TestPodClaimIsStale(t *testing.T) {
const missing = "missing"
const exists = "exists"
const stale = "stale"
const withRef = "with-ref"
testCases := []struct {
name string
claimStates []string
expected bool
skipPodUID bool
}{
{
name: "all missing",
claimStates: []string{missing, missing},
expected: false,
},
{
name: "no claims",
claimStates: []string{},
expected: false,
},
{
name: "exists",
claimStates: []string{missing, exists},
expected: false,
},
{
name: "all refs",
claimStates: []string{withRef, withRef},
expected: false,
},
{
name: "stale & exists",
claimStates: []string{stale, exists},
expected: true,
},
{
name: "stale & missing",
claimStates: []string{stale, missing},
expected: true,
},
{
name: "withRef & stale",
claimStates: []string{withRef, stale},
expected: true,
},
{
name: "withRef, no UID",
claimStates: []string{withRef},
skipPodUID: true,
expected: true,
},
}
for _, tc := range testCases {
set := appsv1beta1.StatefulSet{}
set.Name = "set"
set.Namespace = "default"
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
}
set.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for i, claimState := range tc.claimStates {
claim := v1.PersistentVolumeClaim{}
claim.Name = fmt.Sprintf("claim-%d", i)
set.Spec.VolumeClaimTemplates = append(set.Spec.VolumeClaimTemplates, claim)
claim.Name = fmt.Sprintf("%s-set-3", claim.Name)
claim.Namespace = set.Namespace
switch claimState {
case missing:
// Do nothing, the claim shouldn't exist.
case exists:
claimIndexer.Add(&claim)
case stale:
claim.SetOwnerReferences([]metav1.OwnerReference{
{Name: "set-3", UID: types.UID("stale")},
})
claimIndexer.Add(&claim)
case withRef:
claim.SetOwnerReferences([]metav1.OwnerReference{
{Name: "set-3", UID: types.UID("123")},
})
claimIndexer.Add(&claim)
}
}
pod := v1.Pod{}
pod.Name = "set-3"
if !tc.skipPodUID {
pod.SetUID("123")
}
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
control := NewStatefulPodControl(&fake.Clientset{}, nil, nil, claimLister, &noopRecorder{})
expected := tc.expected
// Note that the error isn't / can't be tested.
if stale, _ := control.PodClaimIsStale(&set, &pod); stale != expected {
t.Errorf("unexpected stale for %s", tc.name)
}
}
}
func TestStatefulPodControlRetainDeletionPolicyUpdate(t *testing.T) {
testFn := func(t *testing.T) {
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(1)
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podLister := corelisters.NewPodLister(podIndexer)
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
podIndexer.Add(pod)
claims := getPersistentVolumeClaims(set, pod)
if len(claims) < 1 {
t.Errorf("Unexpected missing PVCs")
}
for k := range claims {
claim := claims[k]
setOwnerRef(&claim, set, &set.TypeMeta) // This ownerRef should be removed in the update.
claimIndexer.Add(&claim)
}
control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder)
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
for k := range claims {
claim := claims[k]
if hasOwnerRef(&claim, set) {
t.Errorf("ownerRef not removed: %s/%s", claim.Namespace, claim.Name)
}
}
events := collectEvents(recorder.Events)
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
if eventCount := len(events); eventCount != 1 {
t.Errorf("delete failed: got %d events, but want 1", eventCount)
}
} else {
if len(events) != 0 {
t.Errorf("delete failed: expected no events, but got %v", events)
}
}
}
t.Run("StatefulSetAutoDeletePVCEnabled", func(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
testFn(t)
})
t.Run("StatefulSetAutoDeletePVCDisabled", func(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
testFn(t)
})
}
func TestStatefulPodControlRetentionPolicyUpdate(t *testing.T) {
// Only applicable when the feature gate is on; the off case is tested in TestStatefulPodControlRetainRetentionPolicyUpdate.
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(1)
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podIndexer.Add(pod)
claims := getPersistentVolumeClaims(set, pod)
if len(claims) != 1 {
t.Errorf("Unexpected or missing PVCs")
}
var claim v1.PersistentVolumeClaim
for k := range claims {
claim = claims[k]
claimIndexer.Add(&claim)
}
fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
claimIndexer.Update(update.GetObject())
return true, update.GetObject(), nil
})
podLister := corelisters.NewPodLister(podIndexer)
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder)
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Successful update returned an error: %s", err)
}
updatedClaim, err := claimLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
if err != nil {
t.Errorf("Error retrieving claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
if !hasOwnerRef(updatedClaim, set) {
t.Errorf("ownerRef not added: %s/%s", claim.Namespace, claim.Name)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("update failed: got %d events, but want 1", eventCount)
}
}
func TestStatefulPodControlRetentionPolicyUpdateMissingClaims(t *testing.T) {
// Only applicable when the feature gate is on.
defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
recorder := record.NewFakeRecorder(10)
set := newStatefulSet(1)
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
pod := newStatefulSetPod(set, 0)
fakeClient := &fake.Clientset{}
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podLister := corelisters.NewPodLister(podIndexer)
claimIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
claimLister := corelisters.NewPersistentVolumeClaimLister(claimIndexer)
podIndexer.Add(pod)
fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
claimIndexer.Update(update.GetObject())
return true, update.GetObject(), nil
})
control := NewStatefulPodControl(fakeClient, nil, podLister, claimLister, recorder)
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Error("Unexpected error on pod update when PVCs are missing")
}
claims := getPersistentVolumeClaims(set, pod)
if len(claims) != 1 {
t.Errorf("Unexpected or missing PVCs")
}
var claim v1.PersistentVolumeClaim
for k := range claims {
claim = claims[k]
claimIndexer.Add(&claim)
}
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Expected update to succeed, saw error %v", err)
}
updatedClaim, err := claimLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
if err != nil {
t.Errorf("Error retrieving claim %s/%s: %v", claim.Namespace, claim.Name, err)
}
if !hasOwnerRef(updatedClaim, set) {
t.Errorf("ownerRef not added: %s/%s", claim.Namespace, claim.Name)
}
events := collectEvents(recorder.Events)
if eventCount := len(events); eventCount != 1 {
t.Errorf("update failed: got %d events, but want 2", eventCount)
}
if !strings.Contains(events[0], "SuccessfulUpdate") {
t.Errorf("expected first event to be a successful update: %s", events[1])
}
}
func collectEvents(source <-chan string) []string {
done := false
events := make([]string, 0)

View File

@ -26,6 +26,7 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
@ -36,14 +37,16 @@ import (
appspub "github.com/openkruise/kruise/apis/apps/pub"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/lifecycle"
)
// ControlInterface implements the control logic for updating StatefulSets and their children Pods. It is implemented
// StatefulSetControlInterface implements the control logic for updating StatefulSets and their children Pods. It is implemented
// as an interface to allow for extensions that provide different semantics. Currently, there is only one implementation.
type ControlInterface interface {
type StatefulSetControlInterface interface {
// UpdateStatefulSet implements the control logic for Pod creation, update, and deletion, and
// persistent volume creation, update, and deletion.
// If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy.
@ -64,12 +67,12 @@ type ControlInterface interface {
// to update the status of StatefulSets. You should use an instance returned from NewRealStatefulPodControl() for any
// scenario other than testing.
func NewDefaultStatefulSetControl(
podControl StatefulPodControlInterface,
podControl *StatefulPodControl,
inplaceControl inplaceupdate.Interface,
lifecycleControl lifecycle.Interface,
statusUpdater StatusUpdaterInterface,
controllerHistory history.Interface,
recorder record.EventRecorder) ControlInterface {
recorder record.EventRecorder) StatefulSetControlInterface {
return &defaultStatefulSetControl{
podControl,
statusUpdater,
@ -81,10 +84,10 @@ func NewDefaultStatefulSetControl(
}
// defaultStatefulSetControl implements ControlInterface
var _ ControlInterface = &defaultStatefulSetControl{}
var _ StatefulSetControlInterface = &defaultStatefulSetControl{}
type defaultStatefulSetControl struct {
podControl StatefulPodControlInterface
podControl *StatefulPodControl
statusUpdater StatusUpdaterInterface
controllerHistory history.Interface
recorder record.EventRecorder
@ -99,7 +102,6 @@ type defaultStatefulSetControl struct {
// in no particular order. Clients using the burst strategy should be careful to ensure they
// understand the consistency implications of having unpredictable numbers of pods available.
func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.StatefulSet, pods []*v1.Pod) error {
// list all revisions and sort them
revisions, err := ssc.ListRevisions(set)
if err != nil {
@ -107,10 +109,22 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.Statefu
}
history.SortControllerRevisions(revisions)
currentRevision, updateRevision, err := ssc.performUpdate(set, pods, revisions)
if err != nil {
return utilerrors.NewAggregate([]error{err, ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)})
}
// maintain the set's revision history limit
return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)
}
func (ssc *defaultStatefulSetControl) performUpdate(
set *appsv1beta1.StatefulSet, pods []*v1.Pod, revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, error) {
var currentStatus *appsv1beta1.StatefulSetStatus
// get the current, and update revisions
currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions)
if err != nil {
return err
return currentRevision, updateRevision, err
}
// Refresh update expectations
@ -119,33 +133,31 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *appsv1beta1.Statefu
}
// perform the main update function and get the status
status, getStatusErr := ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods, revisions)
updateStatusErr := ssc.updateStatefulSetStatus(set, status)
currentStatus, getStatusErr := ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods, revisions)
updateStatusErr := ssc.updateStatefulSetStatus(set, currentStatus)
if getStatusErr != nil {
return getStatusErr
return currentRevision, updateRevision, getStatusErr
}
if updateStatusErr != nil {
return updateStatusErr
return currentRevision, updateRevision, updateStatusErr
}
klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d available=%d current=%d updated=%d",
klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d",
set.Namespace,
set.Name,
status.Replicas,
status.ReadyReplicas,
status.AvailableReplicas,
status.CurrentReplicas,
status.UpdatedReplicas)
currentStatus.Replicas,
currentStatus.ReadyReplicas,
currentStatus.CurrentReplicas,
currentStatus.UpdatedReplicas)
klog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s",
set.Namespace,
set.Name,
status.CurrentRevision,
status.UpdateRevision)
currentStatus.CurrentRevision,
currentStatus.UpdateRevision)
// maintain the set's revision history limit
return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)
return currentRevision, updateRevision, nil
}
func (ssc *defaultStatefulSetControl) ListRevisions(set *appsv1beta1.StatefulSet) ([]*apps.ControllerRevision, error) {
@ -500,6 +512,15 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
}
// If we find a Pod that has not been created we create the Pod
if !isCreated(replicas[i]) {
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
if isStale, err := ssc.podControl.PodClaimIsStale(set, replicas[i]); err != nil {
return &status, err
} else if isStale {
// If a pod has a stale PVC, no more work can be done this round.
return &status, err
}
}
lifecycle.SetPodLifecycle(appspub.LifecycleStateNormal)(replicas[i])
if err := ssc.podControl.CreateStatefulPod(set, replicas[i]); err != nil {
msg := fmt.Sprintf("StatefulPodControl failed to create Pod error: %s", err)
@ -514,7 +535,6 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
if getPodRevision(replicas[i]) == updateRevision.Name {
status.UpdatedReplicas++
}
// if the set does not allow bursting, return immediately
if monotonic || decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) {
return &status, nil
@ -568,7 +588,16 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
}
}
// Enforce the StatefulSet invariants
if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) {
retentionMatch := true
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
var err error
retentionMatch, err = ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, replicas[i])
// An error is expected if the pod is not yet fully updated, and so return is treated as matching.
if err != nil {
retentionMatch = true
}
}
if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) && retentionMatch {
continue
}
// Make a deep copy so we don't mutate the shared cache
@ -581,6 +610,19 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
// Ensure ownerRefs are set correctly for the condemned pods.
for i := range condemned {
if matchPolicy, err := ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, condemned[i]); err != nil {
return &status, err
} else if !matchPolicy {
if err := ssc.podControl.UpdatePodClaimForRetentionPolicy(updateSet, condemned[i]); err != nil {
return &status, err
}
}
}
}
// At this point, all of the current Replicas are Running and Ready, we can consider termination.
// We will wait for all predecessors to be Running and Ready prior to attempting a deletion.
// We will terminate Pods in a monotonically decreasing order over [len(pods),set.Spec.Replicas).

File diff suppressed because it is too large Load Diff

View File

@ -21,12 +21,13 @@ import (
"context"
"fmt"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
clientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
appslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/util/retry"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
clientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
appslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
)
// StatusUpdaterInterface is an interface used to update the StatefulSetStatus associated with a StatefulSet.

View File

@ -21,13 +21,14 @@ import (
"errors"
"testing"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/client/clientset/versioned/fake"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/client/clientset/versioned/fake"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
)
func TestStatefulSetUpdaterUpdatesSetStatus(t *testing.T) {

View File

@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/history"
@ -124,6 +125,179 @@ func storageMatches(set *appsv1beta1.StatefulSet, pod *v1.Pod) bool {
return true
}
// getPersistentVolumeClaimPolicy returns the PVC policy for a StatefulSet, returning a retain policy if the set policy is nil.
func getPersistentVolumeClaimRetentionPolicy(set *appsv1beta1.StatefulSet) appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy {
policy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
if set.Spec.PersistentVolumeClaimRetentionPolicy != nil {
policy = *set.Spec.PersistentVolumeClaimRetentionPolicy
}
return policy
}
// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the
// PVC deletion policy for the StatefulSet.
func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv1beta1.StatefulSet, pod *v1.Pod) bool {
policy := set.Spec.PersistentVolumeClaimRetentionPolicy
const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType
const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType
switch {
default:
klog.Errorf("Unknown policy %v; treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
fallthrough
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
if hasOwnerRef(claim, set) ||
hasOwnerRef(claim, pod) {
return false
}
case policy.WhenScaled == retain && policy.WhenDeleted == delete:
if !hasOwnerRef(claim, set) ||
hasOwnerRef(claim, pod) {
return false
}
case policy.WhenScaled == delete && policy.WhenDeleted == retain:
if hasOwnerRef(claim, set) {
return false
}
podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas)
if podScaledDown != hasOwnerRef(claim, pod) {
return false
}
case policy.WhenScaled == delete && policy.WhenDeleted == delete:
podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas)
// If a pod is scaled down, there should be no set ref and a pod ref;
// if the pod is not scaled down it's the other way around.
if podScaledDown == hasOwnerRef(claim, set) {
return false
}
if podScaledDown != hasOwnerRef(claim, pod) {
return false
}
}
return true
}
// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of
// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise.
func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv1beta1.StatefulSet, pod *v1.Pod) bool {
needsUpdate := false
// Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef.
// This is the case both in real clusters and the unittests.
// TODO: there must be a better way to do this other than hardcoding the pod version?
updateMeta := func(tm *metav1.TypeMeta, kind string) {
if tm.APIVersion == "" {
if kind == "StatefulSet" {
tm.APIVersion = "apps.kruise.io/v1beta1"
} else {
tm.APIVersion = "v1"
}
}
if tm.Kind == "" {
tm.Kind = kind
}
}
podMeta := pod.TypeMeta
updateMeta(&podMeta, "Pod")
setMeta := set.TypeMeta
updateMeta(&setMeta, "StatefulSet")
policy := set.Spec.PersistentVolumeClaimRetentionPolicy
const retain = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType
const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType
switch {
default:
klog.Errorf("Unknown policy %v, treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
fallthrough
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
case policy.WhenScaled == retain && policy.WhenDeleted == delete:
needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
case policy.WhenScaled == delete && policy.WhenDeleted == retain:
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas)
if podScaledDown {
needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
}
if !podScaledDown {
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
}
case policy.WhenScaled == delete && policy.WhenDeleted == delete:
podScaledDown := getOrdinal(pod) >= int(*set.Spec.Replicas)
if podScaledDown {
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
}
if !podScaledDown {
needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
}
}
return needsUpdate
}
// hasOwnerRef returns true if target has an ownerRef to owner.
func hasOwnerRef(target, owner metav1.Object) bool {
ownerUID := owner.GetUID()
for _, ownerRef := range target.GetOwnerReferences() {
if ownerRef.UID == ownerUID {
return true
}
}
return false
}
// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale.
func hasStaleOwnerRef(target, owner metav1.Object) bool {
for _, ownerRef := range target.GetOwnerReferences() {
if ownerRef.Name == owner.GetName() && ownerRef.UID != owner.GetUID() {
return true
}
}
return false
}
// setOwnerRef adds owner to the ownerRefs of target, if necessary. Returns true if target needs to be
// updated and false otherwise.
func setOwnerRef(target, owner metav1.Object, ownerType *metav1.TypeMeta) bool {
if hasOwnerRef(target, owner) {
return false
}
ownerRefs := append(
target.GetOwnerReferences(),
metav1.OwnerReference{
APIVersion: ownerType.APIVersion,
Kind: ownerType.Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
})
target.SetOwnerReferences(ownerRefs)
return true
}
// removeOwnerRef removes owner from the ownerRefs of target, if necessary. Returns true if target needs
// to be updated and false otherwise.
func removeOwnerRef(target, owner metav1.Object) bool {
if !hasOwnerRef(target, owner) {
return false
}
ownerUID := owner.GetUID()
oldRefs := target.GetOwnerReferences()
newRefs := make([]metav1.OwnerReference, len(oldRefs)-1)
skip := 0
for i := range oldRefs {
if oldRefs[i].UID == ownerUID {
skip = -1
} else {
newRefs[i+skip] = oldRefs[i]
}
}
target.SetOwnerReferences(newRefs)
return true
}
// getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The
// returned PersistentVolumeClaims are each constructed with a the name specific to the Pod. This name is determined
// by getPersistentVolumeClaimName.

View File

@ -21,6 +21,7 @@ import (
"fmt"
"math/rand"
"reflect"
"regexp"
"sort"
"strconv"
"testing"
@ -31,6 +32,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/history"
@ -39,6 +41,28 @@ import (
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
)
// noopRecorder is an EventRecorder that does nothing. record.FakeRecorder has a fixed
// buffer size, which causes tests to hang if that buffer's exceeded.
type noopRecorder struct{}
func (r *noopRecorder) Event(object runtime.Object, eventtype, reason, message string) {}
func (r *noopRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
}
func (r *noopRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
}
// getClaimPodName gets the name of the Pod associated with the Claim, or an empty string if this doesn't look matching.
func getClaimPodName(set *appsv1beta1.StatefulSet, claim *v1.PersistentVolumeClaim) string {
podName := ""
statefulClaimRegex := regexp.MustCompile(fmt.Sprintf(".*-(%s-[0-9]+)$", set.Name))
matches := statefulClaimRegex.FindStringSubmatch(claim.Name)
if len(matches) != 2 {
return podName
}
return matches[1]
}
// overlappingStatefulSets sorts a list of StatefulSets by creation timestamp, using their names as a tie breaker.
// Generally used to tie break between StatefulSets that have overlapping selectors.
type overlappingStatefulSets []*appsv1beta1.StatefulSet
@ -70,6 +94,28 @@ func TestGetParentNameAndOrdinal(t *testing.T) {
}
}
func TestGetClaimPodName(t *testing.T) {
set := appsv1beta1.StatefulSet{}
set.Name = "my-set"
claim := v1.PersistentVolumeClaim{}
claim.Name = "volume-my-set-2"
if pod := getClaimPodName(&set, &claim); pod != "my-set-2" {
t.Errorf("Expected my-set-2 found %s", pod)
}
claim.Name = "long-volume-my-set-20"
if pod := getClaimPodName(&set, &claim); pod != "my-set-20" {
t.Errorf("Expected my-set-20 found %s", pod)
}
claim.Name = "volume-2-my-set"
if pod := getClaimPodName(&set, &claim); pod != "" {
t.Errorf("Expected empty string found %s", pod)
}
claim.Name = "volume-pod-2"
if pod := getClaimPodName(&set, &claim); pod != "" {
t.Errorf("Expected empty string found %s", pod)
}
}
func TestIsMemberOf(t *testing.T) {
set := newStatefulSet(3)
set2 := newStatefulSet(3)
@ -199,6 +245,350 @@ func TestUpdateStorage(t *testing.T) {
}
}
func TestGetPersistentVolumeClaimRetentionPolicy(t *testing.T) {
retainPolicy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
scaledownPolicy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
}
set := appsv1beta1.StatefulSet{}
set.Spec.PersistentVolumeClaimRetentionPolicy = &retainPolicy
got := getPersistentVolumeClaimRetentionPolicy(&set)
if got.WhenScaled != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType || got.WhenDeleted != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType {
t.Errorf("Expected retain policy")
}
set.Spec.PersistentVolumeClaimRetentionPolicy = &scaledownPolicy
got = getPersistentVolumeClaimRetentionPolicy(&set)
if got.WhenScaled != appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType || got.WhenDeleted != appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType {
t.Errorf("Expected scaledown policy")
}
}
func TestClaimOwnerMatchesSetAndPod(t *testing.T) {
testCases := []struct {
name string
scaleDownPolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType
setDeletePolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType
needsPodRef bool
needsSetRef bool
replicas int32
ordinal int
}{
{
name: "retain",
scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
needsPodRef: false,
needsSetRef: false,
},
{
name: "on SS delete",
scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
needsPodRef: false,
needsSetRef: true,
},
{
name: "on scaledown only, condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
needsPodRef: true,
needsSetRef: false,
replicas: 2,
ordinal: 2,
},
{
name: "on scaledown only, remains",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
needsPodRef: false,
needsSetRef: false,
replicas: 2,
ordinal: 1,
},
{
name: "on both, condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
needsPodRef: true,
needsSetRef: false,
replicas: 2,
ordinal: 2,
},
{
name: "on both, remains",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
needsPodRef: false,
needsSetRef: true,
replicas: 2,
ordinal: 1,
},
}
for _, tc := range testCases {
for _, useOtherRefs := range []bool{false, true} {
for _, setPodRef := range []bool{false, true} {
for _, setSetRef := range []bool{false, true} {
claim := v1.PersistentVolumeClaim{}
claim.Name = "target-claim"
pod := v1.Pod{}
pod.Name = fmt.Sprintf("pod-%d", tc.ordinal)
pod.GetObjectMeta().SetUID("pod-123")
set := appsv1beta1.StatefulSet{}
set.Name = "stateful-set"
set.GetObjectMeta().SetUID("ss-456")
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: tc.scaleDownPolicy,
WhenDeleted: tc.setDeletePolicy,
}
set.Spec.Replicas = &tc.replicas
if setPodRef {
setOwnerRef(&claim, &pod, &pod.TypeMeta)
}
if setSetRef {
setOwnerRef(&claim, &set, &set.TypeMeta)
}
if useOtherRefs {
randomObject1 := v1.Pod{}
randomObject1.Name = "rand1"
randomObject1.GetObjectMeta().SetUID("rand1-abc")
randomObject2 := v1.Pod{}
randomObject2.Name = "rand2"
randomObject2.GetObjectMeta().SetUID("rand2-def")
setOwnerRef(&claim, &randomObject1, &randomObject1.TypeMeta)
setOwnerRef(&claim, &randomObject2, &randomObject2.TypeMeta)
}
shouldMatch := setPodRef == tc.needsPodRef && setSetRef == tc.needsSetRef
if claimOwnerMatchesSetAndPod(&claim, &set, &pod) != shouldMatch {
t.Errorf("Bad match for %s with pod=%v,set=%v,others=%v", tc.name, setPodRef, setSetRef, useOtherRefs)
}
}
}
}
}
}
func TestUpdateClaimOwnerRefForSetAndPod(t *testing.T) {
testCases := []struct {
name string
scaleDownPolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType
setDeletePolicy appsv1beta1.PersistentVolumeClaimRetentionPolicyType
condemned bool
needsPodRef bool
needsSetRef bool
}{
{
name: "retain",
scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
condemned: false,
needsPodRef: false,
needsSetRef: false,
},
{
name: "delete with set",
scaleDownPolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
condemned: false,
needsPodRef: false,
needsSetRef: true,
},
{
name: "delete with scaledown, not condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
condemned: false,
needsPodRef: false,
needsSetRef: false,
},
{
name: "delete on scaledown, condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
condemned: true,
needsPodRef: true,
needsSetRef: false,
},
{
name: "delete on both, not condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
condemned: false,
needsPodRef: false,
needsSetRef: true,
},
{
name: "delete on both, condemned",
scaleDownPolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
setDeletePolicy: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
condemned: true,
needsPodRef: true,
needsSetRef: false,
},
}
for _, tc := range testCases {
for _, hasPodRef := range []bool{true, false} {
for _, hasSetRef := range []bool{true, false} {
set := appsv1beta1.StatefulSet{}
set.Name = "ss"
numReplicas := int32(5)
set.Spec.Replicas = &numReplicas
set.SetUID("ss-123")
set.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: tc.scaleDownPolicy,
WhenDeleted: tc.setDeletePolicy,
}
pod := v1.Pod{}
if tc.condemned {
pod.Name = "pod-8"
} else {
pod.Name = "pod-1"
}
pod.SetUID("pod-456")
claim := v1.PersistentVolumeClaim{}
if hasPodRef {
setOwnerRef(&claim, &pod, &pod.TypeMeta)
}
if hasSetRef {
setOwnerRef(&claim, &set, &set.TypeMeta)
}
needsUpdate := hasPodRef != tc.needsPodRef || hasSetRef != tc.needsSetRef
shouldUpdate := updateClaimOwnerRefForSetAndPod(&claim, &set, &pod)
if shouldUpdate != needsUpdate {
t.Errorf("Bad update for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef)
}
if hasOwnerRef(&claim, &pod) != tc.needsPodRef {
t.Errorf("Bad pod ref for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef)
}
if hasOwnerRef(&claim, &set) != tc.needsSetRef {
t.Errorf("Bad set ref for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef)
}
}
}
}
}
func TestHasOwnerRef(t *testing.T) {
target := v1.Pod{}
target.SetOwnerReferences([]metav1.OwnerReference{
{UID: "123"}, {UID: "456"}})
ownerA := v1.Pod{}
ownerA.GetObjectMeta().SetUID("123")
ownerB := v1.Pod{}
ownerB.GetObjectMeta().SetUID("789")
if !hasOwnerRef(&target, &ownerA) {
t.Error("Missing owner")
}
if hasOwnerRef(&target, &ownerB) {
t.Error("Unexpected owner")
}
}
func TestHasStaleOwnerRef(t *testing.T) {
target := v1.Pod{}
target.SetOwnerReferences([]metav1.OwnerReference{
{Name: "bob", UID: "123"}, {Name: "shirley", UID: "456"}})
ownerA := v1.Pod{}
ownerA.SetUID("123")
ownerA.Name = "bob"
ownerB := v1.Pod{}
ownerB.Name = "shirley"
ownerB.SetUID("789")
ownerC := v1.Pod{}
ownerC.Name = "yvonne"
ownerC.SetUID("345")
if hasStaleOwnerRef(&target, &ownerA) {
t.Error("ownerA should not be stale")
}
if !hasStaleOwnerRef(&target, &ownerB) {
t.Error("ownerB should be stale")
}
if hasStaleOwnerRef(&target, &ownerC) {
t.Error("ownerC should not be stale")
}
}
func TestSetOwnerRef(t *testing.T) {
target := v1.Pod{}
ownerA := v1.Pod{}
ownerA.Name = "A"
ownerA.GetObjectMeta().SetUID("ABC")
if setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) != true {
t.Errorf("Unexpected lack of update")
}
ownerRefs := target.GetObjectMeta().GetOwnerReferences()
if len(ownerRefs) != 1 {
t.Errorf("Unexpected owner ref count: %d", len(ownerRefs))
}
if ownerRefs[0].UID != "ABC" {
t.Errorf("Unexpected owner UID %v", ownerRefs[0].UID)
}
if setOwnerRef(&target, &ownerA, &ownerA.TypeMeta) != false {
t.Errorf("Unexpected update")
}
if len(target.GetObjectMeta().GetOwnerReferences()) != 1 {
t.Error("Unexpected duplicate reference")
}
ownerB := v1.Pod{}
ownerB.Name = "B"
ownerB.GetObjectMeta().SetUID("BCD")
if setOwnerRef(&target, &ownerB, &ownerB.TypeMeta) != true {
t.Error("Unexpected lack of second update")
}
ownerRefs = target.GetObjectMeta().GetOwnerReferences()
if len(ownerRefs) != 2 {
t.Errorf("Unexpected owner ref count: %d", len(ownerRefs))
}
if ownerRefs[0].UID != "ABC" || ownerRefs[1].UID != "BCD" {
t.Errorf("Bad second ownerRefs: %v", ownerRefs)
}
}
func TestRemoveOwnerRef(t *testing.T) {
target := v1.Pod{}
ownerA := v1.Pod{}
ownerA.Name = "A"
ownerA.GetObjectMeta().SetUID("ABC")
if removeOwnerRef(&target, &ownerA) != false {
t.Error("Unexpected update on empty remove")
}
setOwnerRef(&target, &ownerA, &ownerA.TypeMeta)
if removeOwnerRef(&target, &ownerA) != true {
t.Error("Unexpected lack of update")
}
if len(target.GetObjectMeta().GetOwnerReferences()) != 0 {
t.Error("Unexpected owner reference remains")
}
ownerB := v1.Pod{}
ownerB.Name = "B"
ownerB.GetObjectMeta().SetUID("BCD")
setOwnerRef(&target, &ownerA, &ownerA.TypeMeta)
if removeOwnerRef(&target, &ownerB) != false {
t.Error("Unexpected update for mismatched owner")
}
if len(target.GetObjectMeta().GetOwnerReferences()) != 1 {
t.Error("Missing ref after no-op remove")
}
setOwnerRef(&target, &ownerB, &ownerB.TypeMeta)
if removeOwnerRef(&target, &ownerA) != true {
t.Error("Missing update for second remove")
}
ownerRefs := target.GetObjectMeta().GetOwnerReferences()
if len(ownerRefs) != 1 {
t.Error("Extra ref after second remove")
}
if ownerRefs[0].UID != "BCD" {
t.Error("Bad UID after second remove")
}
}
func TestIsRunningAndReady(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
@ -400,7 +790,8 @@ func TestRollingUpdateApplyRevision(t *testing.T) {
func newPVC(name string) v1.PersistentVolumeClaim {
return v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
@ -465,6 +856,10 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeM
VolumeClaimTemplates: claims,
ServiceName: "governingsvc",
UpdateStrategy: appsv1beta1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
PersistentVolumeClaimRetentionPolicy: &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
},
RevisionHistoryLimit: func() *int32 {
limit := int32(2)
return &limit
@ -482,3 +877,77 @@ func newStatefulSet(replicas int) *appsv1beta1.StatefulSet {
}
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
}
//func newStatefulSetWithLabels(replicas int, name string, uid types.UID, labels map[string]string) *appsv1beta1.StatefulSet {
// // Converting all the map-only selectors to set-based selectors.
// var testMatchExpressions []metav1.LabelSelectorRequirement
// for key, value := range labels {
// sel := metav1.LabelSelectorRequirement{
// Key: key,
// Operator: metav1.LabelSelectorOpIn,
// Values: []string{value},
// }
// testMatchExpressions = append(testMatchExpressions, sel)
// }
// return &appsv1beta1.StatefulSet{
// TypeMeta: metav1.TypeMeta{
// Kind: "StatefulSet",
// APIVersion: "apps/v1",
// },
// ObjectMeta: metav1.ObjectMeta{
// Name: name,
// Namespace: v1.NamespaceDefault,
// UID: uid,
// },
// Spec: appsv1beta1.StatefulSetSpec{
// Selector: &metav1.LabelSelector{
// // Purposely leaving MatchLabels nil, so to ensure it will break if any link
// // in the chain ignores the set-based MatchExpressions.
// MatchLabels: nil,
// MatchExpressions: testMatchExpressions,
// },
// Replicas: func() *int32 { i := int32(replicas); return &i }(),
// PersistentVolumeClaimRetentionPolicy: &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
// WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
// WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
// },
// Template: v1.PodTemplateSpec{
// ObjectMeta: metav1.ObjectMeta{
// Labels: labels,
// },
// Spec: v1.PodSpec{
// Containers: []v1.Container{
// {
// Name: "nginx",
// Image: "nginx",
// VolumeMounts: []v1.VolumeMount{
// {Name: "datadir", MountPath: "/tmp/"},
// {Name: "home", MountPath: "/home"},
// },
// },
// },
// Volumes: []v1.Volume{{
// Name: "home",
// VolumeSource: v1.VolumeSource{
// HostPath: &v1.HostPathVolumeSource{
// Path: fmt.Sprintf("/tmp/%v", "home"),
// },
// }}},
// },
// },
// VolumeClaimTemplates: []v1.PersistentVolumeClaim{
// {
// ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "datadir"},
// Spec: v1.PersistentVolumeClaimSpec{
// Resources: v1.ResourceRequirements{
// Requests: v1.ResourceList{
// v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
// },
// },
// },
// },
// },
// ServiceName: "governingsvc",
// },
// }
//}

View File

@ -17,9 +17,10 @@ limitations under the License.
package statefulset
import (
v1 "k8s.io/api/core/v1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/util/updatesort"
v1 "k8s.io/api/core/v1"
)
func sortPodsToUpdate(rollingUpdateStrategy *appsv1beta1.RollingUpdateStatefulSetStrategy, updateRevision string, totalReplicas int32, replicas []*v1.Pod) []int {

View File

@ -20,11 +20,12 @@ import (
"reflect"
"testing"
appspub "github.com/openkruise/kruise/apis/apps/pub"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appspub "github.com/openkruise/kruise/apis/apps/pub"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
)
func TestSortPodsToUpdate(t *testing.T) {

View File

@ -23,21 +23,6 @@ import (
"fmt"
"time"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/client"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
"github.com/openkruise/kruise/pkg/util"
utildiscovery "github.com/openkruise/kruise/pkg/util/discovery"
"github.com/openkruise/kruise/pkg/util/expectations"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/lifecycle"
"github.com/openkruise/kruise/pkg/util/ratelimiter"
"github.com/openkruise/kruise/pkg/util/requeueduration"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -61,6 +46,22 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/client"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
"github.com/openkruise/kruise/pkg/features"
"github.com/openkruise/kruise/pkg/util"
utildiscovery "github.com/openkruise/kruise/pkg/util/discovery"
"github.com/openkruise/kruise/pkg/util/expectations"
utilfeature "github.com/openkruise/kruise/pkg/util/feature"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/lifecycle"
"github.com/openkruise/kruise/pkg/util/ratelimiter"
"github.com/openkruise/kruise/pkg/util/requeueduration"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
)
func init() {
@ -141,7 +142,7 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) {
return &ReconcileStatefulSet{
kruiseClient: genericClient.KruiseClient,
control: NewDefaultStatefulSetControl(
NewRealStatefulPodControl(
NewStatefulPodControl(
genericClient.KubeClient,
statefulSetLister,
podLister,
@ -167,7 +168,7 @@ type ReconcileStatefulSet struct {
kruiseClient kruiseclientset.Interface
// control returns an interface capable of syncing a stateful set.
// Abstracted out for testing.
control ControlInterface
control StatefulSetControlInterface
// podControl is used for patching pods.
podControl kubecontroller.PodControlInterface
// podLister is able to list/get pods from a shared informer's store

View File

@ -22,10 +22,11 @@ import (
"path/filepath"
"testing"
"github.com/openkruise/kruise/apis"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/openkruise/kruise/apis"
)
var cfg *rest.Config

View File

@ -25,15 +25,6 @@ import (
"testing"
"time"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
kruisefake "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake"
kruiseinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions"
kruiseappsinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions/apps/v1beta1"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/lifecycle"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -56,6 +47,16 @@ import (
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/history"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
kruisefake "github.com/openkruise/kruise/pkg/client/clientset/versioned/fake"
kruiseinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions"
kruiseappsinformers "github.com/openkruise/kruise/pkg/client/informers/externalversions/apps/v1beta1"
kruiseappslisters "github.com/openkruise/kruise/pkg/client/listers/apps/v1beta1"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/lifecycle"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
)
const statefulSetResyncPeriod = 30 * time.Second
@ -142,8 +143,8 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
t.Error("StatefulSet does not respect termination")
}
sort.Sort(ascendingOrdinal(pods))
spc.DeleteStatefulPod(set, pods[3])
spc.DeleteStatefulPod(set, pods[4])
spc.DeletePod(pods[3])
spc.DeletePod(pods[4])
*set.Spec.Replicas = 0
if err := scaleDownStatefulSetController(set, ssc, spc); err != nil {
t.Errorf("Failed to turn down StatefulSet : %s", err)
@ -193,7 +194,7 @@ func TestStatefulSetControllerBlocksScaling(t *testing.T) {
t.Error("StatefulSet does not block scaling")
}
sort.Sort(ascendingOrdinal(pods))
spc.DeleteStatefulPod(set, pods[0])
spc.DeletePod(pods[0])
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
pods, err = spc.podsLister.Pods(set.Namespace).List(selector)
@ -613,13 +614,14 @@ func splitObjects(initialObjects []runtime.Object) ([]runtime.Object, []runtime.
return kubeObjects, kruiseObjects
}
func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSetController, *fakeStatefulPodControl) {
func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSetController, *fakeObjectManager) {
kubeObjects, kruiseObjects := splitObjects(initialObjects)
client := fake.NewSimpleClientset(kubeObjects...)
kruiseClient := kruisefake.NewSimpleClientset(kruiseObjects...)
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
kruiseInformerFactory := kruiseinformers.NewSharedInformerFactory(kruiseClient, controller.NoResyncPeriodFunc())
fpc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), kruiseInformerFactory.Apps().V1beta1().StatefulSets())
om := newFakeObjectManager(informerFactory, kruiseInformerFactory)
fpc := NewStatefulPodControlFromManager(om, &noopRecorder{})
ssu := newFakeStatefulSetStatusUpdater(kruiseInformerFactory.Apps().V1beta1().StatefulSets())
ssc := NewStatefulSetController(
informerFactory.Core().V1().Pods(),
@ -637,7 +639,7 @@ func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSe
lifecycleControl := lifecycle.NewForInformer(informerFactory.Core().V1().Pods())
ssc.control = NewDefaultStatefulSetControl(fpc, inplaceControl, lifecycleControl, ssu, ssh, recorder)
return ssc, fpc
return ssc, om
}
func fakeWorker(ssc *StatefulSetController) {
@ -655,7 +657,7 @@ func getPodAtOrdinal(pods []*v1.Pod, ordinal int) *v1.Pod {
return pods[ordinal]
}
func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error {
func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeObjectManager) error {
spc.setsIndexer.Add(set)
ssc.enqueueStatefulSet(set)
fakeWorker(ssc)
@ -703,7 +705,7 @@ func scaleUpStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSet
return assertMonotonicInvariants(set, spc)
}
func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeStatefulPodControl) error {
func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulSetController, spc *fakeObjectManager) error {
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return err
@ -726,7 +728,7 @@ func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulS
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
spc.DeleteStatefulPod(set, pod)
spc.DeletePod(pod)
ssc.deletePod(pod)
fakeWorker(ssc)
for set.Status.Replicas > *set.Spec.Replicas {
@ -743,7 +745,7 @@ func scaleDownStatefulSetController(set *appsv1beta1.StatefulSet, ssc *StatefulS
pod = getPodAtOrdinal(pods, ord)
ssc.updatePod(&prev, pod)
fakeWorker(ssc)
spc.DeleteStatefulPod(set, pod)
spc.DeletePod(pod)
ssc.deletePod(pod)
fakeWorker(ssc)
obj, _, err := spc.setsIndexer.Get(set)
@ -787,7 +789,7 @@ func NewStatefulSetController(
ReconcileStatefulSet: ReconcileStatefulSet{
kruiseClient: kruiseClient,
control: NewDefaultStatefulSetControl(
NewRealStatefulPodControl(
NewStatefulPodControl(
kubeClient,
setInformer.Lister(),
podInformer.Lister(),

View File

@ -20,12 +20,6 @@ import (
"context"
"fmt"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/util/expectations"
imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -35,6 +29,13 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/history"
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/pkg/util/expectations"
imagejobutilfunc "github.com/openkruise/kruise/pkg/util/imagejob/utilfunction"
"github.com/openkruise/kruise/pkg/util/inplaceupdate"
"github.com/openkruise/kruise/pkg/util/revisionadapter"
)
func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *appsv1beta1.StatefulSet, currentRevision, updateRevision *apps.ControllerRevision) error {

View File

@ -79,6 +79,9 @@ const (
// InPlaceUpdateEnvFromMetadata enables Kruise to in-place update a container in Pod
// when its env from labels/annotations changed and pod is in-place updating.
InPlaceUpdateEnvFromMetadata featuregate.Feature = "InPlaceUpdateEnvFromMetadata"
// Enables policies controlling deletion of PVCs created by a StatefulSet.
StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC"
)
var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
@ -96,6 +99,7 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
PodUnavailableBudgetUpdateGate: {Default: false, PreRelease: featuregate.Alpha},
TemplateNoDefaults: {Default: false, PreRelease: featuregate.Alpha},
InPlaceUpdateEnvFromMetadata: {Default: false, PreRelease: featuregate.Alpha},
StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha},
}
func init() {

View File

@ -0,0 +1,28 @@
package feature
import (
"fmt"
"testing"
"k8s.io/component-base/featuregate"
)
// SetFeatureGateDuringTest sets the specified gate to the specified value, and returns a function that restores the original value.
// Failures to set or restore cause the test to fail.
//
// Example use:
//
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, true)()
func SetFeatureGateDuringTest(tb testing.TB, gate featuregate.FeatureGate, f featuregate.Feature, value bool) func() {
originalValue := gate.Enabled(f)
if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, value)); err != nil {
tb.Errorf("error setting %s=%v: %v", f, value, err)
}
return func() {
if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, originalValue)); err != nil {
tb.Errorf("error restoring %s=%v: %v", f, originalValue, err)
}
}
}

View File

@ -67,6 +67,26 @@ func validateScaleStrategy(spec *appsv1beta1.StatefulSetSpec, fldPath *field.Pat
return allErrs
}
func ValidatePersistentVolumeClaimRetentionPolicyType(policy appsv1beta1.PersistentVolumeClaimRetentionPolicyType, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
switch policy {
case appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType:
case appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType:
default:
allErrs = append(allErrs, field.NotSupported(fldPath, policy, []string{string(appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType), string(appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType)}))
}
return allErrs
}
func ValidatePersistentVolumeClaimRetentionPolicy(policy *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if policy != nil {
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenDeleted, fldPath.Child("whenDeleted"))...)
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenScaled, fldPath.Child("whenScaled"))...)
}
return allErrs
}
func validateOnDeleteStatefulSetStrategyType(spec *appsv1beta1.StatefulSetSpec, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
@ -208,6 +228,7 @@ func validateStatefulSetSpec(spec *appsv1beta1.StatefulSetSpec, fldPath *field.P
allErrs = append(allErrs, validateReserveOrdinals(spec, fldPath)...)
allErrs = append(allErrs, validateScaleStrategy(spec, fldPath)...)
allErrs = append(allErrs, validateUpdateStrategyType(spec, fldPath)...)
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicy(spec.PersistentVolumeClaimRetentionPolicy, fldPath.Child("persistentVolumeClaimRetentionPolicy"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Replicas), fldPath.Child("replicas"))...)
@ -291,6 +312,9 @@ func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *appsv1beta1.Stateful
restoreStrategy := statefulSet.Spec.UpdateStrategy
statefulSet.Spec.UpdateStrategy = oldStatefulSet.Spec.UpdateStrategy
restorePersistentVolumeClaimRetentionPolicy := statefulSet.Spec.PersistentVolumeClaimRetentionPolicy
statefulSet.Spec.PersistentVolumeClaimRetentionPolicy = oldStatefulSet.Spec.PersistentVolumeClaimRetentionPolicy
restoreScaleStrategy := statefulSet.Spec.ScaleStrategy
statefulSet.Spec.ScaleStrategy = oldStatefulSet.Spec.ScaleStrategy
@ -300,15 +324,17 @@ func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *appsv1beta1.Stateful
statefulSet.Spec.RevisionHistoryLimit = oldStatefulSet.Spec.RevisionHistoryLimit
if !apiequality.Semantic.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', 'reserveOrdinals', 'lifecycle', 'revisionHistoryLimit' and 'updateStrategy' are forbidden"))
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', 'reserveOrdinals', 'lifecycle', 'revisionHistoryLimit', 'persistentVolumeClaimRetentionPolicy' and 'updateStrategy' are forbidden"))
}
statefulSet.Spec.Replicas = restoreReplicas
statefulSet.Spec.Template = restoreTemplate
statefulSet.Spec.UpdateStrategy = restoreStrategy
statefulSet.Spec.ScaleStrategy = restoreScaleStrategy
statefulSet.Spec.ReserveOrdinals = restoreReserveOrdinals
statefulSet.Spec.PersistentVolumeClaimRetentionPolicy = restorePersistentVolumeClaimRetentionPolicy
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*statefulSet.Spec.Replicas), field.NewPath("spec", "replicas"))...)
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicy(statefulSet.Spec.PersistentVolumeClaimRetentionPolicy, field.NewPath("spec", "persistentVolumeClaimRetentionPolicy"))...)
return allErrs
}

View File

@ -19,17 +19,13 @@ package apps
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
appspub "github.com/openkruise/kruise/apis/apps/pub"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
"github.com/openkruise/kruise/test/e2e/framework"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -40,6 +36,13 @@ import (
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
appspub "github.com/openkruise/kruise/apis/apps/pub"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
"github.com/openkruise/kruise/test/e2e/framework"
)
const (
@ -84,7 +87,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}
headlessSvcName := "test"
var statefulPodMounts, podMounts []v1.VolumeMount
var ss *appsv1alpha1.StatefulSet
var ss *appsv1beta1.StatefulSet
ginkgo.BeforeEach(func() {
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
@ -113,7 +116,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst := framework.NewStatefulSetTester(c, kc)
sst.PauseNewPods(ss)
_, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Saturating stateful set " + ss.Name)
@ -155,7 +158,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Replace ss with the one returned from Create() so it has the UID.
// Save Kind since it won't be populated in the returned ss.
kind := ss.Kind
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ss.Kind = kind
@ -237,7 +240,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst := framework.NewStatefulSetTester(c, kc)
sst.PauseNewPods(ss)
_, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
time.Sleep(time.Minute)
@ -295,17 +298,17 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy {
return &appsv1alpha1.RollingUpdateStatefulSetStrategy{
RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy {
return &appsv1beta1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(3)
return &i
}()}
}(),
}
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -327,7 +330,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -355,21 +358,21 @@ var _ = SIGDescribe("StatefulSet", func() {
}
ginkgo.By("Performing a canary update")
ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy {
return &appsv1alpha1.RollingUpdateStatefulSetStrategy{
RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy {
return &appsv1beta1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
}()}
}(),
}
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy {
return &appsv1alpha1.RollingUpdateStatefulSetStrategy{
RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy {
return &appsv1beta1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
@ -447,12 +450,12 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Performing a phased rolling update")
for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1alpha1.RollingUpdateStatefulSetStrategy {
RollingUpdate: func() *appsv1beta1.RollingUpdateStatefulSetStrategy {
j := int32(i)
return &appsv1alpha1.RollingUpdateStatefulSetStrategy{
return &appsv1beta1.RollingUpdateStatefulSetStrategy{
Partition: &j,
}
}(),
@ -506,10 +509,10 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.OnDeleteStatefulSetStrategyType,
}
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -547,7 +550,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -590,15 +593,15 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &appsv1alpha1.RollingUpdateStatefulSetStrategy{
PodUpdatePolicy: appsv1alpha1.InPlaceIfPossiblePodUpdateStrategyType,
RollingUpdate: &appsv1beta1.RollingUpdateStatefulSetStrategy{
PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType,
InPlaceUpdateStrategy: &appspub.InPlaceUpdateStrategy{GracePeriodSeconds: 10},
},
}
ss.Spec.Template.Spec.ReadinessGates = append(ss.Spec.Template.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: appspub.InPlaceUpdateReady})
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -637,10 +640,10 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
var partition int32 = 3
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
if update.Spec.UpdateStrategy.RollingUpdate == nil {
update.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateStatefulSetStrategy{}
update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{}
}
update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
})
@ -651,7 +654,7 @@ var _ = SIGDescribe("StatefulSet", func() {
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
partition = 0
update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
})
@ -683,10 +686,10 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1alpha1.StatefulSetUpdateStrategy{
ss.Spec.UpdateStrategy = appsv1beta1.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &appsv1alpha1.RollingUpdateStatefulSetStrategy{
PodUpdatePolicy: appsv1alpha1.InPlaceIfPossiblePodUpdateStrategyType,
RollingUpdate: &appsv1beta1.RollingUpdateStatefulSetStrategy{
PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType,
InPlaceUpdateStrategy: &appspub.InPlaceUpdateStrategy{GracePeriodSeconds: 10},
},
}
@ -699,7 +702,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['test-env']"}},
})
ss.Spec.Template.Spec.ReadinessGates = append(ss.Spec.Template.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: appspub.InPlaceUpdateReady})
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -735,10 +738,10 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Updating stateful set template: update label for env")
var partition int32 = 3
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.ObjectMeta.Labels["test-env"] = "bar"
if update.Spec.UpdateStrategy.RollingUpdate == nil {
update.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateStatefulSetStrategy{}
update.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{}
}
update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
})
@ -749,7 +752,7 @@ var _ = SIGDescribe("StatefulSet", func() {
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
partition = 0
update.Spec.UpdateStrategy.RollingUpdate.Partition = &partition
})
@ -783,7 +786,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss, err = kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err = kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
@ -864,7 +867,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
@ -931,7 +934,7 @@ var _ = SIGDescribe("StatefulSet", func() {
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
ss.Spec.Template.Spec.NodeName = node.Name
_, err = kc.AppsV1alpha1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{})
_, err = kc.AppsV1beta1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
@ -997,13 +1000,13 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
ginkgo.By("getting scale subresource")
scale, err := kc.AppsV1alpha1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
scale, err := kc.AppsV1beta1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get scale subresource: %v", err)
}
@ -1015,14 +1018,14 @@ var _ = SIGDescribe("StatefulSet", func() {
scale.ResourceVersion = "" // indicate the scale update should be unconditional
}
scale.Spec.Replicas = 2
scaleResult, err := kc.AppsV1alpha1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{})
scaleResult, err := kc.AppsV1beta1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{})
if err != nil {
framework.Failf("Failed to put scale subresource: %v", err)
}
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the statefulset Spec.Replicas was modified")
ss, err = kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{})
ss, err = kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err)
}
@ -1030,13 +1033,11 @@ var _ = SIGDescribe("StatefulSet", func() {
})
})
//framework.KruiseDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
// var sst *framework.StatefulSetTester
//ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
// var appTester *clusterAppTester
//
// ginkgo.BeforeEach(func() {
// sst = framework.NewStatefulSetTester(c, kc)
// appTester = &clusterAppTester{tester: sst, ns: ns}
// appTester = &clusterAppTester{client: c, ns: ns}
// })
//
// ginkgo.AfterEach(func() {
@ -1044,37 +1045,248 @@ var _ = SIGDescribe("StatefulSet", func() {
// framework.DumpDebugInfo(c, ns)
// }
// framework.Logf("Deleting all statefulset in ns %v", ns)
// framework.DeleteAllStatefulSets(c, kc, ns)
// e2estatefulset.DeleteAllStatefulSets(c, ns)
// })
//
// // Do not mark this as Conformance.
// // StatefulSet Conformance should not be dependent on specific applications.
// ginkgo.It("should creating a working zookeeper cluster", func() {
// appTester.statefulPod = &zookeeperTester{tester: sst}
// e2epv.SkipIfNoDefaultStorageClass(c)
// appTester.statefulPod = &zookeeperTester{client: c}
// appTester.run()
// })
//
// // Do not mark this as Conformance.
// // StatefulSet Conformance should not be dependent on specific applications.
// ginkgo.It("should creating a working redis cluster", func() {
// appTester.statefulPod = &redisTester{tester: sst}
// e2epv.SkipIfNoDefaultStorageClass(c)
// appTester.statefulPod = &redisTester{client: c}
// appTester.run()
// })
//
// // Do not mark this as Conformance.
// // StatefulSet Conformance should not be dependent on specific applications.
// ginkgo.It("should creating a working mysql cluster", func() {
// appTester.statefulPod = &mysqlGaleraTester{tester: sst}
// e2epv.SkipIfNoDefaultStorageClass(c)
// appTester.statefulPod = &mysqlGaleraTester{client: c}
// appTester.run()
// })
//
// // Do not mark this as Conformance.
// // StatefulSet Conformance should not be dependent on specific applications.
// ginkgo.It("should creating a working CockroachDB cluster", func() {
// appTester.statefulPod = &cockroachDBTester{tester: sst}
// e2epv.SkipIfNoDefaultStorageClass(c)
// appTester.statefulPod = &cockroachDBTester{client: c}
// appTester.run()
// })
//})
//
//// Make sure minReadySeconds is honored
//// Don't mark it as conformance yet
//ginkgo.It("MinReadySeconds should be honored when enabled", func() {
// ssName := "test-ss"
// headlessSvcName := "test"
// // Define StatefulSet Labels
// ssPodLabels := map[string]string{
// "name": "sample-pod",
// "pod": WebserverImageName,
// }
// ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, ssPodLabels)
// setHTTPProbe(ss)
// ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
// framework.ExpectNoError(err)
// e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 1)
//})
//
//ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func() {
// ssName := "test-ss"
// headlessSvcName := "test"
// // Define StatefulSet Labels
// ssPodLabels := map[string]string{
// "name": "sample-pod",
// "pod": WebserverImageName,
// }
// ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, nil, nil, ssPodLabels)
// ss.Spec.MinReadySeconds = 30
// setHTTPProbe(ss)
// ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
// framework.ExpectNoError(err)
// e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0)
// // let's check that the availableReplicas have still not updated
// time.Sleep(5 * time.Second)
// ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ss.Name, metav1.GetOptions{})
// framework.ExpectNoError(err)
// if ss.Status.AvailableReplicas != 0 {
// framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 0, ss.Status.AvailableReplicas)
// }
// e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2)
//
// ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
// update.Spec.MinReadySeconds = 3600
// })
// framework.ExpectNoError(err)
// // We don't expect replicas to be updated till 1 hour, so the availableReplicas should be 0
// e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0)
//
// ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
// update.Spec.MinReadySeconds = 0
// })
// framework.ExpectNoError(err)
// e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2)
//
// ginkgo.By("check availableReplicas are shown in status")
// out, err := framework.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml")
// framework.ExpectNoError(err)
// if !strings.Contains(out, "availableReplicas: 2") {
// framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out)
// }
//})
ginkgo.Describe("Non-retain StatefulSetPersistentVolumeClaimPolicy [Feature:StatefulSetAutoDeletePVC]", func() {
ssName := "ss"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
headlessSvcName := "test"
var statefulPodMounts, podMounts []v1.VolumeMount
var ss *appsv1beta1.StatefulSet
ginkgo.BeforeEach(func() {
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err)
})
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
framework.DeleteAllStatefulSets(c, kc, ns)
})
ginkgo.It("should delete PVCs with a WhenDeleted policy", func() {
if framework.SkipIfNoDefaultStorageClass(c) {
return
}
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
}
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Confirming all 3 PVCs exist with their owner refs")
err = verifyStatefulSetPVCsExistWithOwnerRefs(c, kc, ss, []int{0, 1, 2}, true, false)
framework.ExpectNoError(err)
ginkgo.By("Deleting stateful set " + ss.Name)
err = kc.AppsV1beta1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Verifying PVCs deleted")
err = verifyStatefulSetPVCsExist(c, ss, []int{})
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs with a OnScaledown policy", func() {
if framework.SkipIfNoDefaultStorageClass(c) {
return
}
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
}
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Confirming all 3 PVCs exist")
err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2})
framework.ExpectNoError(err)
ginkgo.By("Scaling stateful set " + ss.Name + " to one replica")
ss, err = framework.NewStatefulSetTester(c, kc).Scale(ss, 1)
framework.ExpectNoError(err)
ginkgo.By("Verifying all but one PVC deleted")
err = verifyStatefulSetPVCsExist(c, ss, []int{0})
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func() {
if framework.SkipIfNoDefaultStorageClass(c) {
return
}
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenDeleted: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
}
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Confirming all 3 PVCs exist with their owner refs")
err = verifyStatefulSetPVCsExistWithOwnerRefs(c, kc, ss, []int{0, 1, 2}, true, false)
framework.ExpectNoError(err)
ginkgo.By("Orphaning the 3rd pod")
patch, err := json.Marshal(metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{},
})
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
_, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "")
framework.ExpectNoError(err, "Could not patch payload")
ginkgo.By("Deleting stateful set " + ss.Name)
err = kc.AppsV1beta1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Verifying PVCs deleted")
err = verifyStatefulSetPVCsExist(c, ss, []int{})
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func() {
if framework.SkipIfNoDefaultStorageClass(c) {
return
}
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType,
}
_, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Confirming all 3 PVCs exist")
err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2})
framework.ExpectNoError(err)
ginkgo.By("Orphaning the 3rd pod")
patch, err := json.Marshal(metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{},
})
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
_, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "")
framework.ExpectNoError(err, "Could not patch payload")
ginkgo.By("Scaling stateful set " + ss.Name + " to one replica")
ss, err = framework.NewStatefulSetTester(c, kc).Scale(ss, 1)
framework.ExpectNoError(err)
ginkgo.By("Verifying all but one PVC deleted")
err = verifyStatefulSetPVCsExist(c, ss, []int{0})
framework.ExpectNoError(err)
})
})
})
func kubectlExecWithRetries(args ...string) (out string) {
@ -1090,7 +1302,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
}
type statefulPodTester interface {
deploy(ns string) *appsv1alpha1.StatefulSet
deploy(ns string) *appsv1beta1.StatefulSet
write(statefulPodIndex int, kv map[string]string)
read(statefulPodIndex int, key string) string
name() string
@ -1127,7 +1339,7 @@ type statefulPodTester interface {
//}
//
//type zookeeperTester struct {
// ss *appsv1alpha1.StatefulSet
// ss *appsv1beta1.StatefulSet
// tester *framework.StatefulSetTester
//}
//
@ -1135,7 +1347,7 @@ type statefulPodTester interface {
// return "zookeeper"
//}
//
//func (z *zookeeperTester) deploy(ns string) *appsv1alpha1.StatefulSet {
//func (z *zookeeperTester) deploy(ns string) *appsv1beta1.StatefulSet {
// z.ss = z.tester.CreateStatefulSet(zookeeperManifestPath, ns)
// return z.ss
//}
@ -1157,7 +1369,7 @@ type statefulPodTester interface {
//}
//
//type mysqlGaleraTester struct {
// ss *appsv1alpha1.StatefulSet
// ss *appsv1beta1.StatefulSet
// tester *framework.StatefulSetTester
//}
//
@ -1173,7 +1385,7 @@ type statefulPodTester interface {
// return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
//}
//
//func (m *mysqlGaleraTester) deploy(ns string) *appsv1alpha1.StatefulSet {
//func (m *mysqlGaleraTester) deploy(ns string) *appsv1beta1.StatefulSet {
// m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
//
// framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
@ -1200,7 +1412,7 @@ type statefulPodTester interface {
//}
//
//type redisTester struct {
// ss *appsv1alpha1.StatefulSet
// ss *appsv1beta1.StatefulSet
// tester *framework.StatefulSetTester
//}
//
@ -1213,7 +1425,7 @@ type statefulPodTester interface {
// return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
//}
//
//func (m *redisTester) deploy(ns string) *appsv1alpha1.StatefulSet {
//func (m *redisTester) deploy(ns string) *appsv1beta1.StatefulSet {
// m.ss = m.tester.CreateStatefulSet(redisManifestPath, ns)
// return m.ss
//}
@ -1231,7 +1443,7 @@ type statefulPodTester interface {
//}
//
//type cockroachDBTester struct {
// ss *appsv1alpha1.StatefulSet
// ss *appsv1beta1.StatefulSet
// tester *framework.StatefulSetTester
//}
//
@ -1244,7 +1456,7 @@ type statefulPodTester interface {
// return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
//}
//
//func (c *cockroachDBTester) deploy(ns string) *appsv1alpha1.StatefulSet {
//func (c *cockroachDBTester) deploy(ns string) *appsv1beta1.StatefulSet {
// c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
// framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
// for _, cmd := range []string{
@ -1292,10 +1504,10 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1alpha1.StatefulSet) {
func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1beta1.StatefulSet) {
sst := framework.NewStatefulSetTester(c, kc)
sst.SetHTTPProbe(ss)
ss, err := kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -1321,7 +1533,7 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -1365,7 +1577,7 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1alpha1.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, func(update *appsv1beta1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
@ -1404,3 +1616,111 @@ func rollbackTest(c clientset.Interface, kc kruiseclientset.Interface, ns string
priorRevision))
}
}
// verifyStatefulSetPVCsExist confirms that exactly the PVCs for ss with the specified ids exist. This polls until the situation occurs, an error happens, or until timeout (in the latter case an error is also returned). Beware that this cannot tell if a PVC will be deleted at some point in the future, so if used to confirm that no PVCs are deleted, the caller should wait for some event giving the PVCs a reasonable chance to be deleted, before calling this function.
func verifyStatefulSetPVCsExist(c clientset.Interface, ss *appsv1beta1.StatefulSet, claimIds []int) error {
idSet := map[int]struct{}{}
for _, id := range claimIds {
idSet[id] = struct{}{}
}
return wait.PollImmediate(framework.StatefulSetPoll, framework.StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err)
return false, nil
}
for _, claim := range ss.Spec.VolumeClaimTemplates {
pvcNameRE := regexp.MustCompile(fmt.Sprintf("^%s-%s-([0-9]+)$", claim.Name, ss.Name))
seenPVCs := map[int]struct{}{}
for _, pvc := range pvcList.Items {
matches := pvcNameRE.FindStringSubmatch(pvc.Name)
if len(matches) != 2 {
continue
}
ordinal, err := strconv.ParseInt(matches[1], 10, 32)
if err != nil {
framework.Logf("ERROR: bad pvc name %s (%v)", pvc.Name, err)
return false, err
}
if _, found := idSet[int(ordinal)]; !found {
return false, nil // Retry until the PVCs are consistent.
} else {
seenPVCs[int(ordinal)] = struct{}{}
}
}
if len(seenPVCs) != len(idSet) {
framework.Logf("Found %d of %d PVCs", len(seenPVCs), len(idSet))
return false, nil // Retry until the PVCs are consistent.
}
}
return true, nil
})
}
// verifyStatefulSetPVCsExistWithOwnerRefs works as verifyStatefulSetPVCsExist, but also waits for the ownerRefs to match.
func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, kc kruiseclientset.Interface, ss *appsv1beta1.StatefulSet, claimIndicies []int, wantSetRef, wantPodRef bool) error {
indexSet := map[int]struct{}{}
for _, id := range claimIndicies {
indexSet[id] = struct{}{}
}
set, _ := kc.AppsV1beta1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{})
setUID := set.GetUID()
if setUID == "" {
framework.Failf("Statefulset %s mising UID", ss.Name)
}
return wait.PollImmediate(framework.StatefulSetPoll, framework.StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err)
return false, nil
}
for _, claim := range ss.Spec.VolumeClaimTemplates {
pvcNameRE := regexp.MustCompile(fmt.Sprintf("^%s-%s-([0-9]+)$", claim.Name, ss.Name))
seenPVCs := map[int]struct{}{}
for _, pvc := range pvcList.Items {
matches := pvcNameRE.FindStringSubmatch(pvc.Name)
if len(matches) != 2 {
continue
}
ordinal, err := strconv.ParseInt(matches[1], 10, 32)
if err != nil {
framework.Logf("ERROR: bad pvc name %s (%v)", pvc.Name, err)
return false, err
}
if _, found := indexSet[int(ordinal)]; !found {
framework.Logf("Unexpected, retrying")
return false, nil // Retry until the PVCs are consistent.
}
var foundSetRef, foundPodRef bool
for _, ref := range pvc.GetOwnerReferences() {
if ref.Kind == "StatefulSet" && ref.UID == setUID {
foundSetRef = true
}
if ref.Kind == "Pod" {
podName := fmt.Sprintf("%s-%d", ss.Name, ordinal)
pod, err := c.CoreV1().Pods(ss.Namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
framework.Logf("Pod %s not found, retrying (%v)", podName, err)
return false, nil
}
podUID := pod.GetUID()
if podUID == "" {
framework.Failf("Pod %s is missing UID", pod.Name)
}
if ref.UID == podUID {
foundPodRef = true
}
}
}
if foundSetRef == wantSetRef && foundPodRef == wantPodRef {
seenPVCs[int(ordinal)] = struct{}{}
}
}
if len(seenPVCs) != len(indexSet) {
framework.Logf("Only %d PVCs, retrying", len(seenPVCs))
return false, nil // Retry until the PVCs are consistent.
}
}
return true, nil
})
}

View File

@ -20,12 +20,22 @@ package framework
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
const (
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass
isDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
// TODO: remove Beta when no longer used
betaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
)
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
@ -39,3 +49,49 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
}
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
func SkipIfNoDefaultStorageClass(c clientset.Interface) bool {
_, err := GetDefaultStorageClassName(c)
if err != nil {
Logf("error finding default storageClass : %v", err)
return true
}
return false
}
// GetDefaultStorageClassName returns default storageClass or return error
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
}
var scName string
for _, sc := range list.Items {
if isDefaultAnnotation(sc.ObjectMeta) {
if len(scName) != 0 {
return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name)
}
scName = sc.Name
}
}
if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found")
}
Logf("Default storage class: %q", scName)
return scName, nil
}
// isDefaultAnnotation returns a boolean if the default storage class
// annotation is set
// TODO: remove Beta when no longer needed
func isDefaultAnnotation(obj metav1.ObjectMeta) bool {
if obj.Annotations[isDefaultStorageClassAnnotation] == "true" {
return true
}
if obj.Annotations[betaIsDefaultStorageClassAnnotation] == "true" {
return true
}
return false
}

View File

@ -30,7 +30,7 @@ import (
"github.com/onsi/gomega"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
"github.com/openkruise/kruise/test/e2e/manifest"
apps "k8s.io/api/apps/v1"
@ -87,8 +87,8 @@ func NewStatefulSetTester(c clientset.Interface, kc kruiseclientset.Interface) *
}
// GetStatefulSet gets the StatefulSet named name in namespace.
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1alpha1.StatefulSet {
ss, err := s.kc.AppsV1alpha1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1beta1.StatefulSet {
ss, err := s.kc.AppsV1beta1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
}
@ -96,7 +96,7 @@ func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1alpha1
}
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1alpha1.StatefulSet {
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1beta1.StatefulSet {
mkpath := func(file string) string {
return filepath.Join(manifestPath, file)
}
@ -113,14 +113,14 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1al
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
_, err = s.kc.AppsV1alpha1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
_, err = s.kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
return ss
}
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
func (s *StatefulSetTester) CheckMount(ss *appsv1alpha1.StatefulSet, mountPath string) error {
func (s *StatefulSetTester) CheckMount(ss *appsv1beta1.StatefulSet, mountPath string) error {
for _, cmd := range []string{
// Print inode, size etc
fmt.Sprintf("ls -idlh %v", mountPath),
@ -137,7 +137,7 @@ func (s *StatefulSetTester) CheckMount(ss *appsv1alpha1.StatefulSet, mountPath s
}
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1alpha1.StatefulSet, cmd string) error {
func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1beta1.StatefulSet, cmd string) error {
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
@ -150,7 +150,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1alpha1.StatefulSet, cmd
}
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
func (s *StatefulSetTester) CheckHostname(ss *appsv1alpha1.StatefulSet) error {
func (s *StatefulSetTester) CheckHostname(ss *appsv1beta1.StatefulSet) error {
cmd := "printf $(hostname)"
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
@ -166,7 +166,7 @@ func (s *StatefulSetTester) CheckHostname(ss *appsv1alpha1.StatefulSet) error {
}
// Saturate waits for all Pods in ss to become Running and Ready.
func (s *StatefulSetTester) Saturate(ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) Saturate(ss *appsv1beta1.StatefulSet) {
var i int32
for i = 0; i < *(ss.Spec.Replicas); i++ {
Logf("Waiting for stateful pod at index %v to enter Running", i)
@ -177,7 +177,7 @@ func (s *StatefulSetTester) Saturate(ss *appsv1alpha1.StatefulSet) {
}
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1beta1.StatefulSet) {
name := getStatefulSetPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
@ -189,26 +189,26 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1alpha1
type VerifyStatefulPodFunc func(*v1.Pod)
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it.
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1alpha1.StatefulSet, verify VerifyStatefulPodFunc) {
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1beta1.StatefulSet, verify VerifyStatefulPodFunc) {
name := getStatefulSetPodNameAtIndex(index, ss)
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
verify(pod)
}
func getStatefulSetPodNameAtIndex(index int, ss *appsv1alpha1.StatefulSet) string {
func getStatefulSetPodNameAtIndex(index int, ss *appsv1beta1.StatefulSet) string {
// TODO: we won't use "-index" as the name strategy forever,
// pull the name out from an identity mapper.
return fmt.Sprintf("%v-%v", ss.Name, index)
}
// Scale scales ss to count replicas.
func (s *StatefulSetTester) Scale(ss *appsv1alpha1.StatefulSet, count int32) (*appsv1alpha1.StatefulSet, error) {
func (s *StatefulSetTester) Scale(ss *appsv1beta1.StatefulSet, count int32) (*appsv1beta1.StatefulSet, error) {
name := ss.Name
ns := ss.Namespace
Logf("Scaling statefulset %s to %d", name, count)
ss = s.update(ns, name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = count })
ss = s.update(ns, name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
@ -232,12 +232,12 @@ func (s *StatefulSetTester) Scale(ss *appsv1alpha1.StatefulSet, count int32) (*a
}
// UpdateReplicas updates the replicas of ss to count.
func (s *StatefulSetTester) UpdateReplicas(ss *appsv1alpha1.StatefulSet, count int32) {
s.update(ss.Namespace, ss.Name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = count })
func (s *StatefulSetTester) UpdateReplicas(ss *appsv1beta1.StatefulSet, count int32) {
s.update(ss.Namespace, ss.Name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = count })
}
// Restart scales ss to 0 and then back to its previous number of replicas.
func (s *StatefulSetTester) Restart(ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) Restart(ss *appsv1beta1.StatefulSet) {
oldReplicas := *(ss.Spec.Replicas)
ss, err := s.Scale(ss, 0)
ExpectNoError(err)
@ -245,17 +245,17 @@ func (s *StatefulSetTester) Restart(ss *appsv1alpha1.StatefulSet) {
// This way we know the controller has observed all Pod deletions
// before we scale it back up.
s.WaitForStatusReplicas(ss, 0)
s.update(ss.Namespace, ss.Name, func(ss *appsv1alpha1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
s.update(ss.Namespace, ss.Name, func(ss *appsv1beta1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
}
func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1alpha1.StatefulSet)) *appsv1alpha1.StatefulSet {
func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1beta1.StatefulSet)) *appsv1beta1.StatefulSet {
for i := 0; i < 3; i++ {
ss, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
ss, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
Failf("failed to get statefulset %q: %v", name, err)
}
update(ss)
ss, err = s.kc.AppsV1alpha1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{})
ss, err = s.kc.AppsV1beta1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{})
if err == nil {
return ss
}
@ -268,7 +268,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1alpha1
}
// GetPodList gets the current Pods in ss.
func (s *StatefulSetTester) GetPodList(ss *appsv1alpha1.StatefulSet) *v1.PodList {
func (s *StatefulSetTester) GetPodList(ss *appsv1beta1.StatefulSet) *v1.PodList {
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
ExpectNoError(err)
podList, err := s.c.CoreV1().Pods(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
@ -278,7 +278,7 @@ func (s *StatefulSetTester) GetPodList(ss *appsv1alpha1.StatefulSet) *v1.PodList
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
// to scale to count.
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1alpha1.StatefulSet, timeout time.Duration, hard bool) {
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1beta1.StatefulSet, timeout time.Duration, hard bool) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
@ -301,7 +301,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1alpha1.
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
// numPodsReady ordinals to be Ready.
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1beta1.StatefulSet) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
podList := s.GetPodList(ss)
@ -330,10 +330,10 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
}
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func (s *StatefulSetTester) WaitForState(ss *appsv1alpha1.StatefulSet, until func(*appsv1alpha1.StatefulSet, *v1.PodList) (bool, error)) {
func (s *StatefulSetTester) WaitForState(ss *appsv1beta1.StatefulSet, until func(*appsv1beta1.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{})
ssGet, err := s.kc.AppsV1beta1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -347,8 +347,8 @@ func (s *StatefulSetTester) WaitForState(ss *appsv1alpha1.StatefulSet, until fun
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
// The returned StatefulSet contains such a StatefulSetStatus
func (s *StatefulSetTester) WaitForStatus(set *appsv1alpha1.StatefulSet) *appsv1alpha1.StatefulSet {
s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods *v1.PodList) (bool, error) {
func (s *StatefulSetTester) WaitForStatus(set *appsv1beta1.StatefulSet) *appsv1beta1.StatefulSet {
s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration >= set.Generation {
set = set2
return true, nil
@ -359,14 +359,14 @@ func (s *StatefulSetTester) WaitForStatus(set *appsv1alpha1.StatefulSet) *appsv1
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1beta1.StatefulSet) {
s.WaitForRunning(numStatefulPods, numStatefulPods, ss)
}
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
func (s *StatefulSetTester) WaitForPodReady(set *appsv1alpha1.StatefulSet, podName string) (*appsv1alpha1.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPodReady(set *appsv1beta1.StatefulSet, podName string) (*appsv1beta1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
for i := range pods.Items {
@ -381,9 +381,9 @@ func (s *StatefulSetTester) WaitForPodReady(set *appsv1alpha1.StatefulSet, podNa
}
// WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition.
func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1alpha1.StatefulSet, podName string) (*appsv1alpha1.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1beta1.StatefulSet, podName string) (*appsv1beta1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
for i := range pods.Items {
@ -399,7 +399,7 @@ func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1alpha1.StatefulSet, po
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
// complete. set must have a RollingUpdateStatefulSetStrategyType.
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet) (*appsv1alpha1.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
@ -407,7 +407,7 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet)
set.Name,
set.Spec.UpdateStrategy.Type)
}
s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
if len(pods.Items) < int(*set.Spec.Replicas) {
@ -439,7 +439,7 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1alpha1.StatefulSet)
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
// at its update revision.
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.StatefulSet) (*appsv1alpha1.StatefulSet, *v1.PodList) {
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1beta1.StatefulSet) (*appsv1beta1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
@ -452,7 +452,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.St
set.Namespace,
set.Name)
}
s.WaitForState(set, func(set2 *appsv1alpha1.StatefulSet, pods2 *v1.PodList) (bool, error) {
s.WaitForState(set, func(set2 *appsv1beta1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
@ -492,7 +492,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1alpha1.St
}
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1beta1.StatefulSet) {
s.WaitForRunning(numStatefulPods, 0, ss)
}
@ -511,12 +511,12 @@ var httpProbe = &v1.Probe{
// SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
// Note that this cannot be used together with PauseNewPods().
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1beta1.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
}
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1alpha1.StatefulSet) error {
func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1beta1.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -527,7 +527,7 @@ func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1alpha1.StatefulSet) error {
}
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod *v1.Pod) error {
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1beta1.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -540,7 +540,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod
}
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1alpha1.StatefulSet) error {
func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1beta1.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -551,7 +551,7 @@ func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1alpha1.StatefulSet) error
}
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1alpha1.StatefulSet, pod *v1.Pod) error {
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1beta1.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -581,7 +581,7 @@ func hasPauseProbe(pod *v1.Pod) bool {
// This causes all newly-created Pods to stay Unready until they are manually resumed
// with ResumeNextPod().
// Note that this cannot be used together with SetHTTPProbe().
func (s *StatefulSetTester) PauseNewPods(ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) PauseNewPods(ss *appsv1beta1.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
}
@ -590,7 +590,7 @@ func (s *StatefulSetTester) PauseNewPods(ss *appsv1alpha1.StatefulSet) {
// It fails the test if it finds any pods that are not in phase Running,
// or if it finds more than one paused Pod existing at the same time.
// This is a no-op if there are no paused pods.
func (s *StatefulSetTester) ResumeNextPod(ss *appsv1alpha1.StatefulSet) {
func (s *StatefulSetTester) ResumeNextPod(ss *appsv1beta1.StatefulSet) {
podList := s.GetPodList(ss)
resumedPod := ""
for _, pod := range podList.Items {
@ -611,13 +611,13 @@ func (s *StatefulSetTester) ResumeNextPod(ss *appsv1alpha1.StatefulSet) {
}
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1alpha1.StatefulSet, expectedReplicas int32) {
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1beta1.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
ssGet, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -636,13 +636,13 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1alpha1.Stateful
}
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1alpha1.StatefulSet, expectedReplicas int32) {
func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1beta1.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.kc.AppsV1alpha1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
ssGet, err := s.kc.AppsV1beta1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -661,7 +661,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1alpha1.StatefulSet,
}
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func (s *StatefulSetTester) CheckServiceName(ss *appsv1alpha1.StatefulSet, expectedServiceName string) error {
func (s *StatefulSetTester) CheckServiceName(ss *appsv1beta1.StatefulSet, expectedServiceName string) error {
Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName {
@ -680,7 +680,7 @@ func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) {
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
func DeleteAllStatefulSets(c clientset.Interface, kc kruiseclientset.Interface, ns string) {
sst := &StatefulSetTester{c: c, kc: kc}
ssList, err := kc.AppsV1alpha1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
ssList, err := kc.AppsV1beta1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
@ -696,7 +696,7 @@ func DeleteAllStatefulSets(c clientset.Interface, kc kruiseclientset.Interface,
Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
if err := kc.AppsV1alpha1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
if err := kc.AppsV1beta1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
}
@ -773,7 +773,7 @@ func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1alpha1.StatefulSet {
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1beta1.StatefulSet {
mounts := append(statefulPodMounts, podMounts...)
claims := []v1.PersistentVolumeClaim{}
for _, m := range statefulPodMounts {
@ -792,7 +792,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
})
}
return &appsv1alpha1.StatefulSet{
return &appsv1beta1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps.kruise.io/v1alpha1",
@ -801,7 +801,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Name: name,
Namespace: ns,
},
Spec: appsv1alpha1.StatefulSetSpec{
Spec: appsv1beta1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
@ -814,15 +814,16 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
ImagePullPolicy: v1.PullIfNotPresent,
},
},
Volumes: vols,
},
},
UpdateStrategy: appsv1alpha1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
UpdateStrategy: appsv1beta1.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
VolumeClaimTemplates: claims,
ServiceName: governingSvcName,
},
@ -830,7 +831,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
}
// NewStatefulSetScale creates a new StatefulSet scale subresource and returns it
func NewStatefulSetScale(ss *appsv1alpha1.StatefulSet) *appsV1beta2.Scale {
func NewStatefulSetScale(ss *appsv1beta1.StatefulSet) *appsV1beta2.Scale {
return &appsV1beta2.Scale{
// TODO: Create a variant of ObjectMeta type that only contains the fields below.
ObjectMeta: metav1.ObjectMeta{
@ -874,11 +875,11 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
}
type updateStatefulSetFunc func(*appsv1alpha1.StatefulSet)
type updateStatefulSetFunc func(*appsv1beta1.StatefulSet)
// UpdateStatefulSetWithRetries update StatefulSet with retries
func UpdateStatefulSetWithRetries(kc kruiseclientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1alpha1.StatefulSet, err error) {
statefulSets := kc.AppsV1alpha1().StatefulSets(namespace)
func UpdateStatefulSetWithRetries(kc kruiseclientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1beta1.StatefulSet, err error) {
statefulSets := kc.AppsV1beta1().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {

View File

@ -18,7 +18,7 @@ limitations under the License.
package manifest
import (
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1"
"github.com/openkruise/kruise/test/e2e/framework/testfiles"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,8 +28,8 @@ import (
)
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
func StatefulSetFromManifest(fileName, ns string) (*appsv1alpha1.StatefulSet, error) {
var ss appsv1alpha1.StatefulSet
func StatefulSetFromManifest(fileName, ns string) (*appsv1beta1.StatefulSet, error) {
var ss appsv1beta1.StatefulSet
data, err := testfiles.Read(fileName)
if err != nil {
return nil, err