Merge pull request #112905 from alexzielenski/kubectl-apply-csa-migration

kubectl: `apply --server-side` managed fields migration

Kubernetes-commit: 2f837dc113ba35f84e7012a6d1b06b075b349353
This commit is contained in:
Kubernetes Publisher 2022-11-07 17:08:21 -08:00
commit 3814a9af80
5 changed files with 488 additions and 5 deletions

6
go.mod
View File

@ -33,7 +33,7 @@ require (
k8s.io/api v0.0.0-20221108053747-3f61c95cab71
k8s.io/apimachinery v0.0.0-20221108052757-4fe4321a9d5e
k8s.io/cli-runtime v0.0.0-20221108072842-e556445586e6
k8s.io/client-go v0.0.0-20221108054908-3daf180aa6b1
k8s.io/client-go v0.0.0-20221108054910-ea9ec9169797
k8s.io/component-base v0.0.0-20221108061007-abdc0eb56a1d
k8s.io/component-helpers v0.0.0-20221108061658-aa222c251f8e
k8s.io/klog/v2 v2.80.1
@ -43,6 +43,7 @@ require (
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
sigs.k8s.io/kustomize/kyaml v0.13.9
sigs.k8s.io/structured-merge-diff/v4 v4.2.3
sigs.k8s.io/yaml v1.3.0
)
@ -87,14 +88,13 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
replace (
k8s.io/api => k8s.io/api v0.0.0-20221108053747-3f61c95cab71
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20221108052757-4fe4321a9d5e
k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20221108072842-e556445586e6
k8s.io/client-go => k8s.io/client-go v0.0.0-20221108054908-3daf180aa6b1
k8s.io/client-go => k8s.io/client-go v0.0.0-20221108054910-ea9ec9169797
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20221108000200-7429fbb99432
k8s.io/component-base => k8s.io/component-base v0.0.0-20221108061007-abdc0eb56a1d
k8s.io/component-helpers => k8s.io/component-helpers v0.0.0-20221108061658-aa222c251f8e

4
go.sum
View File

@ -546,8 +546,8 @@ k8s.io/apimachinery v0.0.0-20221108052757-4fe4321a9d5e h1:zX/AC2CNrYwngyVHVHcsCL
k8s.io/apimachinery v0.0.0-20221108052757-4fe4321a9d5e/go.mod h1:VXMmlsE7YRJ5vyAyWpkKIfFkEbDNpVs0ObpkuQf1WfM=
k8s.io/cli-runtime v0.0.0-20221108072842-e556445586e6 h1:O5RHkYLsqflfhvXClaKFHb4Gp8++bIIY+lYJG/Q5ZhU=
k8s.io/cli-runtime v0.0.0-20221108072842-e556445586e6/go.mod h1:+qJo1vrpfIpPsTFvZ5thfxazTEZCRmSPdhC0bxQ51/w=
k8s.io/client-go v0.0.0-20221108054908-3daf180aa6b1 h1:0kFOheClgHsssAuquQ5SUM5vDTS8tSaSjv+9J1UXTnI=
k8s.io/client-go v0.0.0-20221108054908-3daf180aa6b1/go.mod h1:nLJ8OQ/Q/icQcfjnVrKZyCgc/CPXX7o8Hlqh70Oo6Jk=
k8s.io/client-go v0.0.0-20221108054910-ea9ec9169797 h1:T9QsUDmalrZVfj0rQztOzzRnrkm0XY0GZm176jmV2Fo=
k8s.io/client-go v0.0.0-20221108054910-ea9ec9169797/go.mod h1:nLJ8OQ/Q/icQcfjnVrKZyCgc/CPXX7o8Hlqh70Oo6Jk=
k8s.io/component-base v0.0.0-20221108061007-abdc0eb56a1d h1:IF51IULm49B+VT4PJOBd4z/SxtU0WVfInKrnykCG/t0=
k8s.io/component-base v0.0.0-20221108061007-abdc0eb56a1d/go.mod h1:yN3pjWt18ANoCwZlZZaB+9OdFe2a6rgTUE51kThAe3Q=
k8s.io/component-helpers v0.0.0-20221108061658-aa222c251f8e h1:v2TulZDdzLZe3sB78qO1wn3l+7UTM6lpofZf4u8cmms=

View File

@ -22,6 +22,7 @@ import (
"net/http"
"github.com/spf13/cobra"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -36,6 +37,7 @@ import (
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/util/csaupgrade"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/cmd/delete"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
@ -163,6 +165,9 @@ var (
warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n"
warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n"
warningMigrationLastAppliedFailed = "Warning: failed to migrate kubectl.kubernetes.io/last-applied-configuration for Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
warningMigrationPatchFailed = "Warning: server rejected managed fields migration to Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
warningMigrationReapplyFailed = "Warning: failed to re-apply configuration after performing Server-Side Apply migration. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
)
// NewApplyFlags returns a default ApplyFlags
@ -542,6 +547,40 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
info.Refresh(obj, true)
// Migrate managed fields if necessary.
//
// By checking afterward instead of fetching the object beforehand and
// unconditionally fetching we can make 3 network requests in the rare
// case of migration and 1 request if migration is unnecessary.
//
// To check beforehand means 2 requests for most operations, and 3
// requests in worst case.
if err = o.saveLastApplyAnnotationIfNecessary(helper, info); err != nil {
fmt.Fprintf(o.ErrOut, warningMigrationLastAppliedFailed, err.Error())
} else if performedMigration, err := o.migrateToSSAIfNecessary(helper, info); err != nil {
// Print-error as a warning.
// This is a non-fatal error because object was successfully applied
// above, but it might have issues since migration failed.
//
// This migration will be re-attempted if necessary upon next
// apply.
fmt.Fprintf(o.ErrOut, warningMigrationPatchFailed, err.Error())
} else if performedMigration {
if obj, err = helper.Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
data,
&options,
); err != nil {
// Re-send original SSA patch (this will allow dropped fields to
// finally be removed)
fmt.Fprintf(o.ErrOut, warningMigrationReapplyFailed, err.Error())
} else {
info.Refresh(obj, false)
}
}
WarnIfDeleting(info.Object, o.ErrOut)
if err := o.MarkObjectVisited(info); err != nil {
@ -660,6 +699,183 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
return nil
}
// Saves the last-applied-configuration annotation in a separate SSA field manager
// to prevent it from being dropped by users who have transitioned to SSA.
//
// If this operation is not performed, then the last-applied-configuration annotation
// would be removed from the object upon the first SSA usage. We want to keep it
// around for a few releases since it is required to downgrade to
// SSA per [1] and [2]. This code should be removed once the annotation is
// deprecated.
//
// - [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#downgrading-from-server-side-apply-to-client-side-apply
// - [2] https://github.com/kubernetes/kubernetes/pull/90187
//
// If the annotation is not already present, or if it is already managed by the
// separate SSA fieldmanager, this is a no-op.
func (o *ApplyOptions) saveLastApplyAnnotationIfNecessary(
helper *resource.Helper,
info *resource.Info,
) error {
if o.FieldManager != fieldManagerServerSideApply {
// There is no point in preserving the annotation if the field manager
// will not remain default. This is because the server will not keep
// the annotation up to date.
return nil
}
// Send an apply patch with the last-applied-annotation
// so that it is not orphaned by SSA in the following patch:
accessor, err := meta.Accessor(info.Object)
if err != nil {
return err
}
// Get the current annotations from the object.
annots := accessor.GetAnnotations()
if annots == nil {
annots = map[string]string{}
}
fieldManager := fieldManagerLastAppliedAnnotation
originalAnnotation, hasAnnotation := annots[corev1.LastAppliedConfigAnnotation]
// If the annotation does not already exist, we do not do anything
if !hasAnnotation {
return nil
}
// If there is already an SSA field manager which owns the field, then there
// is nothing to do here.
if owners := csaupgrade.FindFieldsOwners(
accessor.GetManagedFields(),
metav1.ManagedFieldsOperationApply,
lastAppliedAnnotationFieldPath,
); len(owners) > 0 {
return nil
}
justAnnotation := &unstructured.Unstructured{}
justAnnotation.SetGroupVersionKind(info.Mapping.GroupVersionKind)
justAnnotation.SetName(accessor.GetName())
justAnnotation.SetNamespace(accessor.GetNamespace())
justAnnotation.SetAnnotations(map[string]string{
corev1.LastAppliedConfigAnnotation: originalAnnotation,
})
modified, err := runtime.Encode(unstructured.UnstructuredJSONScheme, justAnnotation)
if err != nil {
return nil
}
helperCopy := *helper
newObj, err := helperCopy.WithFieldManager(fieldManager).Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
modified,
nil,
)
if err != nil {
return err
}
return info.Refresh(newObj, false)
}
// Check if the returned object needs to have its kubectl-client-side-apply
// managed fields migrated server-side-apply.
//
// field ownership metadata is stored in three places:
// - server-side managed fields
// - client-side managed fields
// - and the last_applied_configuration annotation.
//
// The migration merges the client-side-managed fields into the
// server-side-managed fields, leaving the last_applied_configuration
// annotation in place. Server will keep the annotation up to date
// after every server-side-apply where the following conditions are ment:
//
// 1. field manager is 'kubectl'
// 2. annotation already exists
func (o *ApplyOptions) migrateToSSAIfNecessary(
helper *resource.Helper,
info *resource.Info,
) (migrated bool, err error) {
accessor, err := meta.Accessor(info.Object)
if err != nil {
return false, err
}
// To determine which field managers were used by kubectl for client-side-apply
// we search for a manager used in `Update` operations which owns the
// last-applied-annotation.
//
// This is the last client-side-apply manager which changed the field.
//
// There may be multiple owners if multiple managers wrote the same exact
// configuration. In this case there are multiple owners, we want to migrate
// them all.
csaManagers := csaupgrade.FindFieldsOwners(
accessor.GetManagedFields(),
metav1.ManagedFieldsOperationUpdate,
lastAppliedAnnotationFieldPath)
managerNames := sets.New[string]()
for _, entry := range csaManagers {
managerNames.Insert(entry.Manager)
}
// Re-attempt patch as many times as it is conflicting due to ResourceVersion
// test failing
for i := 0; i < maxPatchRetry; i++ {
var patchData []byte
var obj runtime.Object
patchData, err = csaupgrade.UpgradeManagedFieldsPatch(
info.Object, managerNames, o.FieldManager)
if err != nil {
// If patch generation failed there was likely a bug.
return false, err
} else if patchData == nil {
// nil patch data means nothing to do - object is already migrated
return false, nil
}
// Send the patch to upgrade the managed fields if it is non-nil
obj, err = helper.Patch(
info.Namespace,
info.Name,
types.JSONPatchType,
patchData,
nil,
)
if err == nil {
// Stop retrying upon success.
info.Refresh(obj, false)
return true, nil
} else if !errors.IsConflict(err) {
// Only retry if there was a conflict
return false, err
}
// Refresh the object for next iteration
err = info.Get()
if err != nil {
// If there was an error fetching, return error
return false, err
}
}
// Reaching this point with non-nil error means there was a conflict and
// max retries was hit
// Return the last error witnessed (which will be a conflict)
return false, err
}
func (o *ApplyOptions) shouldPrintObject() bool {
// Print object only if output format other than "name" is specified
shouldPrint := false
@ -766,6 +982,16 @@ const (
// for backward compatibility to not conflict with old versions
// of kubectl server-side apply where `kubectl` has already been the field manager.
fieldManagerServerSideApply = "kubectl"
fieldManagerLastAppliedAnnotation = "kubectl-last-applied"
)
var (
lastAppliedAnnotationFieldPath = fieldpath.NewSet(
fieldpath.MakePathOrDie(
"metadata", "annotations",
corev1.LastAppliedConfigAnnotation),
)
)
// GetApplyFieldManagerFlag gets the field manager for kubectl apply

View File

@ -31,6 +31,7 @@ import (
openapi_v2 "github.com/google/gnostic/openapiv2"
"github.com/spf13/cobra"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@ -40,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
@ -47,6 +49,7 @@ import (
dynamicfakeclient "k8s.io/client-go/dynamic/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/rest/fake"
"k8s.io/client-go/util/csaupgrade"
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
@ -185,6 +188,7 @@ const (
filenameRCLastAppliedArgs = "../../../testdata/apply/rc-lastapplied-args.yaml"
filenameRCNoAnnotation = "../../../testdata/apply/rc-no-annotation.yaml"
filenameRCLASTAPPLIED = "../../../testdata/apply/rc-lastapplied.yaml"
filenameRCManagedFieldsLA = "../../../testdata/apply/rc-managedfields-lastapplied.yaml"
filenameSVC = "../../../testdata/apply/service.yaml"
filenameRCSVC = "../../../testdata/apply/rc-service.yaml"
filenameNoExistRC = "../../../testdata/apply/rc-noexist.yaml"
@ -710,6 +714,157 @@ func TestApplyPruneObjects(t *testing.T) {
}
}
// Tests that apply of object in need of CSA migration results in a call
// to patch it.
func TestApplyCSAMigration(t *testing.T) {
cmdtesting.InitTestErrorHandler(t)
nameRC, rcWithManagedFields := readAndAnnotateReplicationController(t, filenameRCManagedFieldsLA)
pathRC := "/namespaces/test/replicationcontrollers/" + nameRC
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
// The object after patch should be equivalent to the output of
// csaupgrade.UpgradeManagedFields
//
// Parse object into unstructured, apply patch
postPatchObj := &unstructured.Unstructured{}
err := json.Unmarshal(rcWithManagedFields, &postPatchObj.Object)
require.NoError(t, err)
expectedPatch, err := csaupgrade.UpgradeManagedFieldsPatch(postPatchObj, sets.New(FieldManagerClientSideApply), "kubectl")
require.NoError(t, err)
err = csaupgrade.UpgradeManagedFields(postPatchObj, sets.New("kubectl-client-side-apply"), "kubectl")
require.NoError(t, err)
postPatchData, err := json.Marshal(postPatchObj)
require.NoError(t, err)
patches := 0
targetPatches := 2
applies := 0
tf.UnstructuredClient = &fake.RESTClient{
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case p == pathRC && m == "GET":
// During retry loop for patch fetch is performed.
// keep returning the unchanged data
if patches < targetPatches {
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
}
t.Fatalf("should not do a fetch in serverside-apply")
return nil, nil
case p == pathRC && m == "PATCH":
if got := req.Header.Get("Content-Type"); got == string(types.ApplyPatchType) {
defer func() {
applies += 1
}()
switch applies {
case 0:
// initial apply.
// Just return the same object but with managed fields
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
case 1:
// Second apply should include only last apply annotation unmodified
// Return new object
// NOTE: on a real server this would also modify the managed fields
// just return the same object unmodified. It is not so important
// for this test for the last-applied to appear in new field
// manager response, only that the client asks the server to do it
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
case 2, 3:
// Before the last apply we have formed our JSONPAtch so it
// should reply now with the upgraded object
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
default:
require.Fail(t, "sent more apply requests than expected")
return &http.Response{StatusCode: http.StatusBadRequest, Header: cmdtesting.DefaultHeader()}, nil
}
} else if got == string(types.JSONPatchType) {
defer func() {
patches += 1
}()
// Require that the patch is equal to what is expected
body, err := ioutil.ReadAll(req.Body)
require.NoError(t, err)
require.Equal(t, expectedPatch, body)
switch patches {
case targetPatches - 1:
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
default:
// Return conflict until the client has retried enough times
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader()}, nil
}
} else {
t.Fatalf("unexpected content-type: %s\n", got)
return nil, nil
}
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.OpenAPISchemaFunc = FakeOpenAPISchema.OpenAPISchemaFn
tf.FakeOpenAPIGetter = FakeOpenAPISchema.OpenAPIGetter
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams()
cmd := NewCmdApply("kubectl", tf, ioStreams)
cmd.Flags().Set("filename", filenameRC)
cmd.Flags().Set("output", "yaml")
cmd.Flags().Set("server-side", "true")
cmd.Flags().Set("show-managed-fields", "true")
cmd.Run(cmd, []string{})
// JSONPatch should have been attempted exactly the given number of times
require.Equal(t, targetPatches, patches, "should retry as many times as a conflict was returned")
require.Equal(t, 3, applies, "should perform specified # of apply calls upon migration")
require.Empty(t, errBuf.String())
// ensure that in the future there will be no migrations necessary
// (by showing migration is a no-op)
rc := &corev1.ReplicationController{}
if err := runtime.DecodeInto(codec, buf.Bytes(), rc); err != nil {
t.Fatal(err)
}
upgradedRC := rc.DeepCopyObject()
err = csaupgrade.UpgradeManagedFields(upgradedRC, sets.New("kubectl-client-side-apply"), "kubectl")
require.NoError(t, err)
require.NotEmpty(t, rc.ManagedFields)
require.Equal(t, rc, upgradedRC, "upgrading should be no-op in future")
// Apply the upgraded object.
// Expect only a single PATCH call to apiserver
ioStreams, _, _, errBuf = genericclioptions.NewTestIOStreams()
cmd = NewCmdApply("kubectl", tf, ioStreams)
cmd.Flags().Set("filename", filenameRC)
cmd.Flags().Set("output", "yaml")
cmd.Flags().Set("server-side", "true")
cmd.Flags().Set("show-managed-fields", "true")
cmd.Run(cmd, []string{})
require.Empty(t, errBuf)
require.Equal(t, 4, applies, "only a single call to server-side apply should have been performed")
require.Equal(t, targetPatches, patches, "no more json patches should have been needed")
}
func TestApplyObjectOutput(t *testing.T) {
cmdtesting.InitTestErrorHandler(t)
nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC)

View File

@ -0,0 +1,102 @@
apiVersion: v1
kind: ReplicationController
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"ReplicationController","metadata":{"annotations":{},"labels":{"name":"test-rc"},"name":"test-rc","namespace":"test"},"spec":{"replicas":1,"template":{"metadata":{"labels":{"name":"test-rc"}},"spec":{"containers":[{"image":"nginx","name":"test-rc","ports":[{"containerPort":80}]}]}}}}
creationTimestamp: "2022-10-06T20:46:22Z"
generation: 1
labels:
name: test-rc
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:fullyLabeledReplicas: {}
f:observedGeneration: {}
f:replicas: {}
manager: kube-controller-manager
operation: Update
subresource: status
time: "2022-10-06T20:46:22Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:kubectl.kubernetes.io/last-applied-configuration: {}
f:labels:
.: {}
f:name: {}
f:spec:
f:replicas: {}
f:selector: {}
f:template:
.: {}
f:metadata:
.: {}
f:creationTimestamp: {}
f:labels:
.: {}
f:name: {}
f:spec:
.: {}
f:containers:
.: {}
k:{"name":"test-rc"}:
.: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:ports:
.: {}
k:{"containerPort":80,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:protocol: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:dnsPolicy: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:terminationGracePeriodSeconds: {}
manager: kubectl-client-side-apply
operation: Update
time: "2022-10-06T20:46:22Z"
name: test-rc
namespace: test
resourceVersion: "290"
uid: ad68b34c-d6c5-4d09-b50d-ef49c109778d
spec:
replicas: 1
selector:
name: test-rc
template:
metadata:
creationTimestamp: null
labels:
name: test-rc
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: test-rc
ports:
- containerPort: 80
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
fullyLabeledReplicas: 1
observedGeneration: 1
replicas: 1