Merge pull request #319 from seans3/final-inventory-fix

Fix for final inventory update
This commit is contained in:
Kubernetes Prow Robot 2021-02-01 15:30:28 -08:00 committed by GitHub
commit 152b41c82b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 251 additions and 132 deletions

2
go.sum
View File

@ -703,8 +703,6 @@ sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvO
sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI=
sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY=
sigs.k8s.io/kustomize/kyaml v0.10.6 h1:xUJxc/k8JoWqHUahaB8DTqY0KwEPxTbTGStvW8TOcDc=
sigs.k8s.io/kustomize/kyaml v0.10.6/go.mod h1:K9yg1k/HB/6xNOf5VH3LhTo1DK9/5ykSZO5uIv+Y/1k=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=

View File

@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog"
"sigs.k8s.io/cli-utils/pkg/apply/event"
"sigs.k8s.io/cli-utils/pkg/apply/info"
"sigs.k8s.io/cli-utils/pkg/apply/poller"
@ -89,6 +90,7 @@ func (a *Applier) Initialize() error {
// resources for the subsequent apply. Returns the sorted resources to
// apply as well as the objects for the prune, or an error if one occurred.
func (a *Applier) prepareObjects(localInv inventory.InventoryInfo, localObjs []*unstructured.Unstructured) (*ResourceObjects, error) {
klog.V(4).Infof("applier preparing %d objects", len(localObjs))
if localInv == nil {
return nil, fmt.Errorf("the local inventory can't be nil")
}
@ -97,20 +99,22 @@ func (a *Applier) prepareObjects(localInv inventory.InventoryInfo, localObjs []*
}
// Ensures the namespace exists before applying the inventory object into it.
if invNamespace := inventoryNamespaceInSet(localInv, localObjs); invNamespace != nil {
klog.V(4).Infof("applier prepareObjects applying namespace %s", invNamespace.GetName())
if err := a.invClient.ApplyInventoryNamespace(invNamespace); err != nil {
return nil, err
}
}
klog.V(4).Infof("applier merging %d objects into inventory", len(localObjs))
currentObjs := object.UnstructuredsToObjMetas(localObjs)
// returns the objects (pruneIds) to prune after apply. The prune
// algorithm requires stopping if the merge is not successful. Otherwise,
// the stored objects in inventory could become inconsistent.
pruneIds, err := a.invClient.Merge(localInv, currentObjs)
if err != nil {
return nil, err
}
klog.V(4).Infof("after inventory merge; %d objects to prune", len(pruneIds))
// Sort order for applied resources.
sort.Sort(ordering.SortableUnstructureds(localObjs))
@ -173,6 +177,7 @@ func (r *ResourceObjects) AllIds() []object.ObjMetadata {
// cancellation or timeout will only affect how long we Wait for the
// resources to become current.
func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, objects []*unstructured.Unstructured, options Options) <-chan event.Event {
klog.V(4).Infof("apply run for %d objects", len(objects))
eventChannel := make(chan event.Event)
setDefaults(&options)
a.invClient.SetDryRunStrategy(options.DryRunStrategy) // client shared with prune, so sets dry-run for prune too.
@ -195,6 +200,7 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, obje
}
// Fetch the queue (channel) of tasks that should be executed.
klog.V(4).Infoln("applier building task queue...")
taskQueue := (&solver.TaskQueueSolver{
PruneOptions: a.PruneOptions,
Factory: a.provider.Factory(),
@ -229,7 +235,9 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, obje
}
// Create a new TaskStatusRunner to execute the taskQueue.
klog.V(4).Infoln("applier building TaskStatusRunner...")
runner := taskrunner.NewTaskStatusRunner(resourceObjects.AllIds(), a.StatusPoller)
klog.V(4).Infoln("applier running TaskStatusRunner...")
err = runner.Run(ctx, taskQueue, eventChannel, taskrunner.Options{
PollInterval: options.PollInterval,
UseCache: true,

View File

@ -83,114 +83,174 @@ type Options struct {
}
// Prune deletes the set of resources which were previously applied
// (retrieved from previous inventory objects) but omitted in
// the current apply. Prune also delete all previous inventory
// objects. Returns an error if there was a problem.
func (po *PruneOptions) Prune(localInv inventory.InventoryInfo, localObjs []*unstructured.Unstructured, currentUIDs sets.String,
taskContext *taskrunner.TaskContext, o Options) error {
// but omitted in the current apply. Calculates the set of objects
// to prune by removing the currently applied objects from the union
// set of the previously applied objects and currently applied objects
// stored in the cluster inventory. As a final step, stores the current
// inventory which is all the successfully applied objects and the
// prune failures. Does not stop when encountering prune failures.
// Returns an error for unrecoverable errors.
//
// Parameters:
// localInv - locally read inventory object
// localObjs - locally read, currently applied (attempted) objects
// currentUIDs - UIDs for successfully applied objects
// taskContext - task for apply/prune
func (po *PruneOptions) Prune(localInv inventory.InventoryInfo,
localObjs []*unstructured.Unstructured,
currentUIDs sets.String,
taskContext *taskrunner.TaskContext,
o Options) error {
// Validate parameters
if localInv == nil {
return fmt.Errorf("the local inventory object can't be nil")
}
invNamespace := localInv.Namespace()
klog.V(4).Infof("prune local inventory object: %s/%s", invNamespace, localInv.Name())
// Get the list of Object Meta from the local objects.
localIds := object.UnstructuredsToObjMetas(localObjs)
// Create the set of namespaces for currently (locally) applied objects, including
// the namespace the inventory object lives in (if it's not cluster-scoped). When
// pruning, check this set of namespaces to ensure these namespaces are not deleted.
localNamespaces := mergeObjNamespaces(localObjs)
if invNamespace != "" {
localNamespaces.Insert(invNamespace)
}
clusterObjs, err := po.InvClient.GetClusterObjs(localInv)
localNamespaces := localNamespaces(localInv, localIds)
clusterInv, err := po.InvClient.GetClusterObjs(localInv)
if err != nil {
return err
}
klog.V(4).Infof("prune %d currently applied objects", len(currentUIDs))
klog.V(4).Infof("prune %d previously applied objects", len(clusterObjs))
klog.V(4).Infof("prune: %d objects attempted to apply", len(localIds))
klog.V(4).Infof("prune: %d objects successfully applied", len(currentUIDs))
klog.V(4).Infof("prune: %d union objects stored in cluster inventory", len(clusterInv))
pruneObjs := object.SetDiff(clusterInv, localIds)
klog.V(4).Infof("prune: %d objects to prune (clusterInv - localIds)", len(pruneObjs))
// Sort the resources in reverse order using the same rules as is
// used for apply.
sort.Sort(sort.Reverse(ordering.SortableMetas(clusterObjs)))
for _, clusterObj := range clusterObjs {
mapping, err := po.mapper.RESTMapping(clusterObj.GroupKind)
if err != nil {
localIds = append(localIds, clusterObj)
taskContext.EventChannel() <- createPruneFailedEvent(clusterObj, err)
taskContext.CaptureResourceFailure(clusterObj)
continue
}
namespacedClient := po.client.Resource(mapping.Resource).Namespace(clusterObj.Namespace)
obj, err := namespacedClient.Get(context.TODO(), clusterObj.Name, metav1.GetOptions{})
sort.Sort(sort.Reverse(ordering.SortableMetas(pruneObjs)))
// Store prune failures to ensure they remain in the inventory.
pruneFailures := []object.ObjMetadata{}
for _, pruneObj := range pruneObjs {
klog.V(5).Infof("attempting prune: %s", pruneObj)
obj, err := po.getObject(pruneObj)
if err != nil {
// Object not found in cluster, so no need to delete it; skip to next object.
if apierrors.IsNotFound(err) {
klog.V(5).Infof("%s/%s not found in cluster--skipping",
pruneObj.Namespace, pruneObj.Name)
continue
}
localIds = append(localIds, clusterObj)
taskContext.EventChannel() <- createPruneFailedEvent(clusterObj, err)
taskContext.CaptureResourceFailure(clusterObj)
if klog.V(5) {
klog.Errorf("prune obj (%s/%s) UID retrival error: %s",
pruneObj.Namespace, pruneObj.Name, err)
}
pruneFailures = append(pruneFailures, pruneObj)
taskContext.EventChannel() <- createPruneFailedEvent(pruneObj, err)
taskContext.CaptureResourceFailure(pruneObj)
continue
}
metadata, err := meta.Accessor(obj)
if err != nil {
localIds = append(localIds, clusterObj)
taskContext.EventChannel() <- createPruneFailedEvent(clusterObj, err)
taskContext.CaptureResourceFailure(clusterObj)
continue
}
// If this cluster object is not also a currently applied
// object, then it has been omitted--prune it. If the cluster
// object is part of the local apply set, skip it.
uid := string(metadata.GetUID())
klog.V(7).Infof("prune previously applied object UID: %s", uid)
// Do not prune objects that are in set of currently applied objects.
uid := string(obj.GetUID())
if currentUIDs.Has(uid) {
klog.V(7).Infof("prune object in current apply; do not prune: %s", uid)
klog.V(5).Infof("prune object in current apply; do not prune: %s", uid)
continue
}
// Handle lifecycle directive preventing deletion.
if !canPrune(localInv, obj, o.InventoryPolicy, uid) {
taskContext.EventChannel() <- createPruneEvent(clusterObj, obj, event.PruneSkipped)
localIds = append(localIds, clusterObj)
klog.V(4).Infof("skip prune for lifecycle directive %s/%s", pruneObj.Namespace, pruneObj.Name)
taskContext.EventChannel() <- createPruneEvent(pruneObj, obj, event.PruneSkipped)
pruneFailures = append(pruneFailures, pruneObj)
continue
}
// If regular pruning (not destroying), skip deleting namespace containing
// currently applied objects.
if !po.Destroy {
if clusterObj.GroupKind == object.CoreV1Namespace.GroupKind() &&
localNamespaces.Has(clusterObj.Name) {
klog.V(4).Infof("skip pruning namespace: %s", clusterObj.Name)
taskContext.EventChannel() <- createPruneEvent(clusterObj, obj, event.PruneSkipped)
localIds = append(localIds, clusterObj)
taskContext.CaptureResourceFailure(clusterObj)
if pruneObj.GroupKind == object.CoreV1Namespace.GroupKind() &&
localNamespaces.Has(pruneObj.Name) {
klog.V(4).Infof("skip pruning namespace: %s", pruneObj.Name)
taskContext.EventChannel() <- createPruneEvent(pruneObj, obj, event.PruneSkipped)
pruneFailures = append(pruneFailures, pruneObj)
taskContext.CaptureResourceFailure(pruneObj)
continue
}
}
if !o.DryRunStrategy.ClientOrServerDryRun() {
klog.V(4).Infof("prune object delete: %s/%s", clusterObj.Namespace, clusterObj.Name)
err = namespacedClient.Delete(context.TODO(), clusterObj.Name, metav1.DeleteOptions{})
klog.V(4).Infof("prune object delete: %s/%s", pruneObj.Namespace, pruneObj.Name)
namespacedClient, err := po.namespacedClient(pruneObj)
if err != nil {
taskContext.EventChannel() <- createPruneFailedEvent(clusterObj, err)
localIds = append(localIds, clusterObj)
taskContext.CaptureResourceFailure(clusterObj)
if klog.V(4) {
klog.Errorf("prune failed for %s/%s (%s)", pruneObj.Namespace, pruneObj.Name, err)
}
taskContext.EventChannel() <- createPruneFailedEvent(pruneObj, err)
pruneFailures = append(pruneFailures, pruneObj)
taskContext.CaptureResourceFailure(pruneObj)
continue
}
err = namespacedClient.Delete(context.TODO(), pruneObj.Name, metav1.DeleteOptions{})
if err != nil {
if klog.V(4) {
klog.Errorf("prune failed for %s/%s (%s)", pruneObj.Namespace, pruneObj.Name, err)
}
taskContext.EventChannel() <- createPruneFailedEvent(pruneObj, err)
pruneFailures = append(pruneFailures, pruneObj)
taskContext.CaptureResourceFailure(pruneObj)
continue
}
}
taskContext.EventChannel() <- createPruneEvent(clusterObj, obj, event.Pruned)
taskContext.EventChannel() <- createPruneEvent(pruneObj, obj, event.Pruned)
}
return po.InvClient.Replace(localInv, localIds)
// Calculate final inventory items, ensuring only successfully applied
// objects are in the inventory along with prune failures.
finalInventory := []object.ObjMetadata{}
for _, localObj := range localIds {
obj, err := po.getObject(localObj)
if err != nil {
if klog.V(4) {
klog.Errorf("error retrieving object for inventory determination: %s", err)
}
continue
}
uid := string(obj.GetUID())
if currentUIDs.Has(uid) {
klog.V(5).Infof("adding final inventory object %s/%s", localObj.Namespace, localObj.Name)
finalInventory = append(finalInventory, localObj)
} else {
klog.V(5).Infof("uid not found (%s); not adding final inventory obj %s/%s",
uid, localObj.Namespace, localObj.Name)
}
}
klog.V(4).Infof("final inventory %d successfully applied objects", len(finalInventory))
finalInventory = append(finalInventory, pruneFailures...)
klog.V(4).Infof("final inventory %d objects after appending prune failures", len(finalInventory))
return po.InvClient.Replace(localInv, finalInventory)
}
// mergeObjNamespaces returns a set of strings of all the namespaces
// for non cluster-scoped objects. These namespaces are forced to
// lower-case.
func mergeObjNamespaces(objs []*unstructured.Unstructured) sets.String {
func (po *PruneOptions) namespacedClient(obj object.ObjMetadata) (dynamic.ResourceInterface, error) {
mapping, err := po.mapper.RESTMapping(obj.GroupKind)
if err != nil {
return nil, err
}
return po.client.Resource(mapping.Resource).Namespace(obj.Namespace), nil
}
func (po *PruneOptions) getObject(obj object.ObjMetadata) (*unstructured.Unstructured, error) {
namespacedClient, err := po.namespacedClient(obj)
if err != nil {
return nil, err
}
return namespacedClient.Get(context.TODO(), obj.Name, metav1.GetOptions{})
}
// localNamespaces returns a set of strings of all the namespaces
// for the passed non cluster-scoped localObjs, plus the namespace
// of the passed inventory object.
func localNamespaces(localInv inventory.InventoryInfo, localObjs []object.ObjMetadata) sets.String {
namespaces := sets.NewString()
for _, obj := range objs {
namespace := strings.TrimSpace(strings.ToLower(obj.GetNamespace()))
for _, obj := range localObjs {
namespace := strings.TrimSpace(strings.ToLower(obj.Namespace))
if namespace != "" {
namespaces.Insert(namespace)
}
}
invNamespace := strings.TrimSpace(strings.ToLower(localInv.Namespace()))
if invNamespace != "" {
namespaces.Insert(invNamespace)
}
return namespaces
}

View File

@ -174,60 +174,62 @@ func TestPrune(t *testing.T) {
tests := map[string]struct {
// pastObjs/currentObjs do NOT contain the inventory object.
// Inventory object is generated from these past/current objects.
pastObjs []*unstructured.Unstructured
currentObjs []*unstructured.Unstructured
prunedObjs []*unstructured.Unstructured
pruneEventObjs []*unstructured.Unstructured
isError bool
pastObjs []*unstructured.Unstructured
currentObjs []*unstructured.Unstructured
prunedObjs []*unstructured.Unstructured
// finalClusterObjs are the objects in cluster at the end of prune,
// and the objects which should be stored in the inventory object.
finalClusterObjs []*unstructured.Unstructured
pruneEventObjs []*unstructured.Unstructured
}{
"Past and current objects are empty; no pruned objects": {
pastObjs: []*unstructured.Unstructured{},
currentObjs: []*unstructured.Unstructured{},
prunedObjs: []*unstructured.Unstructured{},
pruneEventObjs: []*unstructured.Unstructured{},
isError: false,
pastObjs: []*unstructured.Unstructured{},
currentObjs: []*unstructured.Unstructured{},
prunedObjs: []*unstructured.Unstructured{},
finalClusterObjs: []*unstructured.Unstructured{},
pruneEventObjs: []*unstructured.Unstructured{},
},
"Past and current objects are the same; no pruned objects": {
pastObjs: []*unstructured.Unstructured{namespace, pdb},
currentObjs: []*unstructured.Unstructured{pdb, namespace},
prunedObjs: []*unstructured.Unstructured{},
pruneEventObjs: []*unstructured.Unstructured{},
isError: false,
pastObjs: []*unstructured.Unstructured{namespace, pdb},
currentObjs: []*unstructured.Unstructured{pdb, namespace},
prunedObjs: []*unstructured.Unstructured{},
finalClusterObjs: []*unstructured.Unstructured{namespace, pdb},
pruneEventObjs: []*unstructured.Unstructured{},
},
"No past objects; no pruned objects": {
pastObjs: []*unstructured.Unstructured{},
currentObjs: []*unstructured.Unstructured{pdb, namespace},
pruneEventObjs: []*unstructured.Unstructured{},
prunedObjs: []*unstructured.Unstructured{},
isError: false,
pastObjs: []*unstructured.Unstructured{},
currentObjs: []*unstructured.Unstructured{pdb, namespace},
pruneEventObjs: []*unstructured.Unstructured{},
finalClusterObjs: []*unstructured.Unstructured{pdb, namespace},
prunedObjs: []*unstructured.Unstructured{},
},
"No current objects; all previous objects pruned in correct order": {
pastObjs: []*unstructured.Unstructured{namespace, pdb, role},
currentObjs: []*unstructured.Unstructured{},
prunedObjs: []*unstructured.Unstructured{pdb, role, namespace},
pruneEventObjs: []*unstructured.Unstructured{pdb, role, namespace},
isError: false,
pastObjs: []*unstructured.Unstructured{pdb, role, pod},
currentObjs: []*unstructured.Unstructured{},
prunedObjs: []*unstructured.Unstructured{pod, pdb, role},
finalClusterObjs: []*unstructured.Unstructured{},
pruneEventObjs: []*unstructured.Unstructured{pod, pdb, role},
},
"Omitted object is pruned": {
pastObjs: []*unstructured.Unstructured{namespace, pdb},
currentObjs: []*unstructured.Unstructured{pdb, role},
prunedObjs: []*unstructured.Unstructured{namespace},
pruneEventObjs: []*unstructured.Unstructured{namespace},
isError: false,
pastObjs: []*unstructured.Unstructured{pdb, role},
currentObjs: []*unstructured.Unstructured{pdb},
prunedObjs: []*unstructured.Unstructured{role},
finalClusterObjs: []*unstructured.Unstructured{pdb},
pruneEventObjs: []*unstructured.Unstructured{role},
},
"Prevent delete lifecycle annotation stops pruning": {
pastObjs: []*unstructured.Unstructured{preventDelete, pdb},
currentObjs: []*unstructured.Unstructured{pdb, role},
prunedObjs: []*unstructured.Unstructured{},
pruneEventObjs: []*unstructured.Unstructured{preventDelete},
isError: false,
pastObjs: []*unstructured.Unstructured{preventDelete, pdb},
currentObjs: []*unstructured.Unstructured{pdb, role},
prunedObjs: []*unstructured.Unstructured{},
finalClusterObjs: []*unstructured.Unstructured{preventDelete, pdb, role},
pruneEventObjs: []*unstructured.Unstructured{preventDelete},
},
"Namespace not pruned if objects are still in it": {
pastObjs: []*unstructured.Unstructured{namespace, pdb, pod},
currentObjs: []*unstructured.Unstructured{pod},
prunedObjs: []*unstructured.Unstructured{pdb},
pruneEventObjs: []*unstructured.Unstructured{pdb, namespace},
isError: false,
pastObjs: []*unstructured.Unstructured{namespace, pdb, pod},
currentObjs: []*unstructured.Unstructured{pod},
prunedObjs: []*unstructured.Unstructured{pdb},
finalClusterObjs: []*unstructured.Unstructured{namespace, pod},
pruneEventObjs: []*unstructured.Unstructured{pdb, namespace},
},
}
for name, tc := range tests {
@ -235,14 +237,18 @@ func TestPrune(t *testing.T) {
drs := common.Strategies[i]
t.Run(name, func(t *testing.T) {
po := NewPruneOptions()
// Set up the previously applied objects.
// Set up the union of previously applied objects and the
// currently applied objects as the current inventory items.
clusterObjs := object.UnstructuredsToObjMetas(tc.pastObjs)
po.InvClient = inventory.NewFakeInventoryClient(clusterObjs)
// Set up the currently applied objects.
currentInventory := createInventoryInfo(tc.currentObjs...)
currentObjs := object.UnstructuredsToObjMetas(tc.currentObjs)
fakeInvClient := inventory.NewFakeInventoryClient(object.Union(clusterObjs, currentObjs))
po.InvClient = fakeInvClient
// Set up the current inventory with union of objects.
unionObjs := unionObjects(tc.pastObjs, tc.currentObjs)
currentInventory := createInventoryInfo(unionObjs...)
// Set up the fake dynamic client to recognize all objects, and the RESTMapper.
objs := []runtime.Object{}
for _, obj := range tc.pastObjs {
for _, obj := range unionObjs {
objs = append(objs, obj)
}
po.client = fake.NewSimpleDynamicClient(scheme.Scheme, objs...)
@ -259,35 +265,57 @@ func TestPrune(t *testing.T) {
DryRunStrategy: drs,
})
}()
if !tc.isError {
if err != nil {
t.Fatalf("Unexpected error during Prune(): %#v", err)
}
var actualPruneEvents []event.Event
for e := range eventChannel {
actualPruneEvents = append(actualPruneEvents, e)
}
if want, got := len(tc.pruneEventObjs), len(actualPruneEvents); want != got {
t.Errorf("Expected (%d) prune events, got (%d)", want, got)
}
if err != nil {
t.Fatalf("Unexpected error during Prune(): %#v", err)
}
for i, obj := range tc.pruneEventObjs {
e := actualPruneEvents[i]
expKind := obj.GetObjectKind().GroupVersionKind().Kind
actKind := e.PruneEvent.Identifier.GroupKind.Kind
if expKind != actKind {
t.Errorf("Expected kind %s, got %s", expKind, actKind)
}
// Test that the correct inventory objects are stored at the end of the prune.
actualObjs := fakeInvClient.Objs
expectedObjs := object.UnstructuredsToObjMetas(tc.finalClusterObjs)
if !object.SetEquals(expectedObjs, actualObjs) {
t.Errorf("expected inventory objs (%s), got (%s)", expectedObjs, actualObjs)
}
var actualPruneEvents []event.Event
for e := range eventChannel {
actualPruneEvents = append(actualPruneEvents, e)
}
if want, got := len(tc.pruneEventObjs), len(actualPruneEvents); want != got {
t.Errorf("Expected (%d) prune events, got (%d)", want, got)
}
for i, obj := range tc.pruneEventObjs {
e := actualPruneEvents[i]
expKind := obj.GetObjectKind().GroupVersionKind().Kind
actKind := e.PruneEvent.Identifier.GroupKind.Kind
if expKind != actKind {
t.Errorf("Expected kind %s, got %s", expKind, actKind)
}
} else if err == nil {
t.Fatalf("Expected error during Prune() but received none")
}
})
}
}
}
// unionObjects returns the union of sliceA and sliceB as a slice of unstructured objects.
func unionObjects(sliceA []*unstructured.Unstructured, sliceB []*unstructured.Unstructured) []*unstructured.Unstructured {
m := map[string]*unstructured.Unstructured{}
for _, a := range sliceA {
metadata := object.UnstructuredToObjMeta(a)
m[metadata.String()] = a
}
for _, b := range sliceB {
metadata := object.UnstructuredToObjMeta(b)
m[metadata.String()] = b
}
union := []*unstructured.Unstructured{}
for _, u := range m {
union = append(union, u)
}
return union
}
// populateObjectIds returns a pointer to a set of strings containing
// the UID's of the passed objects (infos).
func populateObjectIds(objs []*unstructured.Unstructured, t *testing.T) sets.String {

View File

@ -15,6 +15,7 @@ import (
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/klog"
"k8s.io/kubectl/pkg/cmd/apply"
"k8s.io/kubectl/pkg/cmd/delete"
"k8s.io/kubectl/pkg/cmd/util"
@ -73,6 +74,7 @@ var getClusterObj = getClusterObject
func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
go func() {
objects := a.Objects
klog.V(4).Infof("apply task starting; attempting to apply %d objects", len(objects))
// If this is a dry run, we need to handle situations where
// we have a CRD and a CR in the same resource set, but the CRD
@ -102,6 +104,7 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
// for that here. It could happen if this is dry-run and we removed
// all resources in the previous step.
if len(objects) == 0 {
klog.V(4).Infoln("no objects to apply after dry-run filtering--returning")
a.sendTaskResult(taskContext)
return
}
@ -111,11 +114,15 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
ao, dynamic, err := applyOptionsFactoryFunc(taskContext.EventChannel(),
a.ServerSideOptions, a.DryRunStrategy, a.Factory)
if err != nil {
if klog.V(4) {
klog.Errorf("error creating ApplyOptions (%s)--returning", err)
}
sendBatchApplyEvents(taskContext, objects, err)
a.sendTaskResult(taskContext)
return
}
klog.V(4).Infof("attempting to apply %d remaining objects", len(objects))
var infos []*resource.Info
for _, obj := range objects {
// Set the client and mapping fields on the provided
@ -123,6 +130,10 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
info, err := a.InfoHelper.BuildInfo(obj)
id := object.UnstructuredToObjMeta(obj)
if err != nil {
if klog.V(4) {
klog.Errorf("unable to convert obj to info for %s/%s (%s)--continue",
obj.GetNamespace(), obj.GetName(), err)
}
taskContext.EventChannel() <- createApplyEvent(
id, event.Failed, applyerror.NewUnknownTypeError(err))
taskContext.CaptureResourceFailure(id)
@ -132,6 +143,10 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
clusterObj, err := getClusterObj(dynamic, info)
if err != nil {
if !apierrors.IsNotFound(err) {
if klog.V(4) {
klog.Errorf("error retrieving %s/%s from cluster--continue",
info.Namespace, info.Name)
}
taskContext.EventChannel() <- createApplyEvent(
id,
event.Unchanged,
@ -143,6 +158,8 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
infos = append(infos, info)
canApply, err := inventory.CanApply(a.InvInfo, clusterObj, a.InventoryPolicy)
if !canApply {
klog.V(5).Infof("can not apply %s/%s--continue",
clusterObj.GetNamespace(), clusterObj.GetName())
taskContext.EventChannel() <- createApplyEvent(
id,
event.Unchanged,
@ -153,8 +170,12 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
// add the inventory annotation to the resource being applied.
inventory.AddInventoryIDAnnotation(obj, a.InvInfo)
ao.SetObjects([]*resource.Info{info})
klog.V(5).Infof("applying %s/%s...", info.Namespace, info.Name)
err = ao.Run()
if err != nil {
if klog.V(4) {
klog.Errorf("error applying (%s/%s) %s", info.Namespace, info.Name, err)
}
taskContext.EventChannel() <- createApplyEvent(
id, event.Failed, applyerror.NewApplyRunError(err))
taskContext.CaptureResourceFailure(id)
@ -173,9 +194,12 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
if err != nil {
continue
}
// Only add a resource if it successfully applied.
uid := acc.GetUID()
gen := acc.GetGeneration()
taskContext.ResourceApplied(id, uid, gen)
if string(uid) != "" {
gen := acc.GetGeneration()
taskContext.ResourceApplied(id, uid, gen)
}
}
}
a.sendTaskResult(taskContext)

View File

@ -440,6 +440,7 @@ var deployment = toUnstructured(map[string]interface{}{
"metadata": map[string]interface{}{
"name": "deploy",
"namespace": "default",
"uid": "uid-deployment",
},
})