mirror of https://github.com/fluxcd/cli-utils.git
Improve the event hierarchy
This commit is contained in:
parent
a051c61f67
commit
ae80e561e2
|
|
@ -8,8 +8,6 @@ import (
|
|||
"io"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
|
|
@ -29,78 +27,57 @@ type formatter struct {
|
|||
print printFunc
|
||||
}
|
||||
|
||||
func (ef *formatter) FormatApplyEvent(ae event.ApplyEvent, as *list.ApplyStats, c list.Collector) error {
|
||||
switch ae.Type {
|
||||
case event.ApplyEventCompleted:
|
||||
output := fmt.Sprintf("%d resource(s) applied. %d created, %d unchanged, %d configured, %d failed",
|
||||
as.Sum(), as.Created, as.Unchanged, as.Configured, as.Failed)
|
||||
// Only print information about serverside apply if some of the
|
||||
// resources actually were applied serverside.
|
||||
if as.ServersideApplied > 0 {
|
||||
output += fmt.Sprintf(", %d serverside applied", as.ServersideApplied)
|
||||
}
|
||||
ef.print(output)
|
||||
for id, se := range c.LatestStatus() {
|
||||
ef.printResourceStatus(id, se)
|
||||
}
|
||||
case event.ApplyEventResourceUpdate:
|
||||
gk := ae.Identifier.GroupKind
|
||||
name := ae.Identifier.Name
|
||||
if ae.Error != nil {
|
||||
ef.print("%s failed: %s", resourceIDToString(gk, name),
|
||||
ae.Error.Error())
|
||||
} else {
|
||||
ef.print("%s %s", resourceIDToString(gk, name),
|
||||
strings.ToLower(ae.Operation.String()))
|
||||
}
|
||||
func (ef *formatter) FormatApplyEvent(ae event.ApplyEvent) error {
|
||||
gk := ae.Identifier.GroupKind
|
||||
name := ae.Identifier.Name
|
||||
if ae.Error != nil {
|
||||
ef.print("%s apply failed: %s", resourceIDToString(gk, name),
|
||||
ae.Error.Error())
|
||||
} else {
|
||||
ef.print("%s %s", resourceIDToString(gk, name),
|
||||
strings.ToLower(ae.Operation.String()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ef *formatter) FormatStatusEvent(se event.StatusEvent, _ list.Collector) error {
|
||||
if se.Type == event.StatusEventResourceUpdate {
|
||||
id := se.Resource.Identifier
|
||||
ef.printResourceStatus(id, se)
|
||||
}
|
||||
func (ef *formatter) FormatStatusEvent(se event.StatusEvent) error {
|
||||
id := se.Identifier
|
||||
ef.printResourceStatus(id, se)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ef *formatter) FormatPruneEvent(pe event.PruneEvent, ps *list.PruneStats) error {
|
||||
switch pe.Type {
|
||||
case event.PruneEventCompleted:
|
||||
ef.print("%d resource(s) pruned, %d skipped, %d failed", ps.Pruned, ps.Skipped, ps.Failed)
|
||||
case event.PruneEventResourceUpdate:
|
||||
gk := pe.Identifier.GroupKind
|
||||
switch pe.Operation {
|
||||
case event.Pruned:
|
||||
ef.print("%s pruned", resourceIDToString(gk, pe.Identifier.Name))
|
||||
case event.PruneSkipped:
|
||||
ef.print("%s prune skipped", resourceIDToString(gk, pe.Identifier.Name))
|
||||
}
|
||||
case event.PruneEventFailed:
|
||||
ef.print("%s prune failed: %s", resourceIDToString(pe.Identifier.GroupKind, pe.Identifier.Name),
|
||||
func (ef *formatter) FormatPruneEvent(pe event.PruneEvent) error {
|
||||
gk := pe.Identifier.GroupKind
|
||||
if pe.Error != nil {
|
||||
ef.print("%s prune failed: %s", resourceIDToString(gk, pe.Identifier.Name),
|
||||
pe.Error.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
switch pe.Operation {
|
||||
case event.Pruned:
|
||||
ef.print("%s pruned", resourceIDToString(gk, pe.Identifier.Name))
|
||||
case event.PruneSkipped:
|
||||
ef.print("%s prune skipped", resourceIDToString(gk, pe.Identifier.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ef *formatter) FormatDeleteEvent(de event.DeleteEvent, ds *list.DeleteStats) error {
|
||||
switch de.Type {
|
||||
case event.DeleteEventCompleted:
|
||||
ef.print("%d resource(s) deleted, %d skipped", ds.Deleted, ds.Skipped)
|
||||
case event.DeleteEventResourceUpdate:
|
||||
obj := de.Object
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
name := getName(obj)
|
||||
switch de.Operation {
|
||||
case event.Deleted:
|
||||
ef.print("%s deleted", resourceIDToString(gvk.GroupKind(), name))
|
||||
case event.DeleteSkipped:
|
||||
ef.print("%s delete skipped", resourceIDToString(gvk.GroupKind(), name))
|
||||
}
|
||||
case event.DeleteEventFailed:
|
||||
ef.print("%s deletion failed: %s", resourceIDToString(de.Identifier.GroupKind, de.Identifier.Name),
|
||||
func (ef *formatter) FormatDeleteEvent(de event.DeleteEvent) error {
|
||||
gk := de.Identifier.GroupKind
|
||||
name := de.Identifier.Name
|
||||
|
||||
if de.Error != nil {
|
||||
ef.print("%s deletion failed: %s", resourceIDToString(gk, name),
|
||||
de.Error.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
switch de.Operation {
|
||||
case event.Deleted:
|
||||
ef.print("%s deleted", resourceIDToString(gk, name))
|
||||
case event.DeleteSkipped:
|
||||
ef.print("%s delete skipped", resourceIDToString(gk, name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -109,18 +86,46 @@ func (ef *formatter) FormatErrorEvent(_ event.ErrorEvent) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ef *formatter) printResourceStatus(id object.ObjMetadata, se event.StatusEvent) {
|
||||
ef.print("%s is %s: %s", resourceIDToString(id.GroupKind, id.Name),
|
||||
se.Resource.Status.String(), se.Resource.Message)
|
||||
}
|
||||
func (ef *formatter) FormatActionGroupEvent(age event.ActionGroupEvent, ags []event.ActionGroup,
|
||||
as *list.ApplyStats, ps *list.PruneStats, ds *list.DeleteStats, c list.Collector) error {
|
||||
if age.Action == event.ApplyAction && age.Type == event.Finished {
|
||||
output := fmt.Sprintf("%d resource(s) applied. %d created, %d unchanged, %d configured, %d failed",
|
||||
as.Sum(), as.Created, as.Unchanged, as.Configured, as.Failed)
|
||||
// Only print information about serverside apply if some of the
|
||||
// resources actually were applied serverside.
|
||||
if as.ServersideApplied > 0 {
|
||||
output += fmt.Sprintf(", %d serverside applied", as.ServersideApplied)
|
||||
}
|
||||
ef.print(output)
|
||||
}
|
||||
|
||||
func getName(obj runtime.Object) string {
|
||||
if acc, err := meta.Accessor(obj); err == nil {
|
||||
if n := acc.GetName(); len(n) > 0 {
|
||||
return n
|
||||
if age.Action == event.PruneAction && age.Type == event.Finished {
|
||||
ef.print("%d resource(s) pruned, %d skipped, %d failed", ps.Pruned, ps.Skipped, ps.Failed)
|
||||
}
|
||||
|
||||
if age.Action == event.DeleteAction && age.Type == event.Finished {
|
||||
ef.print("%d resource(s) deleted, %d skipped", ds.Deleted, ds.Skipped)
|
||||
}
|
||||
|
||||
if age.Action == event.WaitAction && age.Type == event.Started {
|
||||
ag, found := list.ActionGroupByName(age.GroupName, ags)
|
||||
if !found {
|
||||
panic(fmt.Errorf("unknown action group name %q", age.GroupName))
|
||||
}
|
||||
for id, se := range c.LatestStatus() {
|
||||
// Only print information about objects that we actually care about
|
||||
// for this wait task.
|
||||
if found := object.ObjMetas(ag.Identifiers).Contains(id); found {
|
||||
ef.printResourceStatus(id, se)
|
||||
}
|
||||
}
|
||||
}
|
||||
return "<unknown>"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ef *formatter) printResourceStatus(id object.ObjMetadata, se event.StatusEvent) {
|
||||
ef.print("%s is %s: %s", resourceIDToString(id.GroupKind, id.Name),
|
||||
se.PollResourceInfo.Status.String(), se.PollResourceInfo.Message)
|
||||
}
|
||||
|
||||
// resourceIDToString returns the string representation of a GroupKind and a resource name.
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunNone,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Created,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep created",
|
||||
|
|
@ -41,7 +40,6 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunClient,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep configured (preview)",
|
||||
|
|
@ -50,7 +48,6 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunServer,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("batch", "CronJob", "foo", "my-cron"),
|
||||
},
|
||||
expected: "cronjob.batch/my-cron configured (preview-server)",
|
||||
|
|
@ -58,42 +55,10 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
"apply event with error should display the error": {
|
||||
previewStrategy: common.DryRunServer,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Failed,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
Error: fmt.Errorf("this is a test error"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep failed: this is a test error (preview-server)",
|
||||
},
|
||||
"completed event": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.ApplyEvent{
|
||||
Type: event.ApplyEventCompleted,
|
||||
},
|
||||
applyStats: &list.ApplyStats{
|
||||
ServersideApplied: 1,
|
||||
},
|
||||
statusCollector: &fakeCollector{
|
||||
m: map[object.ObjMetadata]event.StatusEvent{
|
||||
object.ObjMetadata{ //nolint:gofmt
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Namespace: "foo",
|
||||
Name: "my-dep",
|
||||
}: {
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Status: status.CurrentStatus,
|
||||
Message: "Resource is Current",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `
|
||||
1 resource(s) applied. 0 created, 0 unchanged, 0 configured, 0 failed, 1 serverside applied
|
||||
deployment.apps/my-dep is Current: Resource is Current
|
||||
`,
|
||||
expected: "deployment.apps/my-dep apply failed: this is a test error (preview-server)",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -101,7 +66,7 @@ deployment.apps/my-dep is Current: Resource is Current
|
|||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatApplyEvent(tc.event, tc.applyStats, tc.statusCollector)
|
||||
err := formatter.FormatApplyEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(out.String()))
|
||||
|
|
@ -119,8 +84,15 @@ func TestFormatter_FormatStatusEvent(t *testing.T) {
|
|||
"resource update with Current status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Identifier: object.ObjMetadata{
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
PollResourceInfo: &pollevent.ResourceStatus{
|
||||
Identifier: object.ObjMetadata{
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
|
|
@ -141,7 +113,7 @@ func TestFormatter_FormatStatusEvent(t *testing.T) {
|
|||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatStatusEvent(tc.event, tc.statusCollector)
|
||||
err := formatter.FormatStatusEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(out.String()))
|
||||
|
|
@ -160,7 +132,6 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunNone,
|
||||
event: event.PruneEvent{
|
||||
Operation: event.Pruned,
|
||||
Type: event.PruneEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep pruned",
|
||||
|
|
@ -169,7 +140,6 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunClient,
|
||||
event: event.PruneEvent{
|
||||
Operation: event.PruneSkipped,
|
||||
Type: event.PruneEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep prune skipped (preview)",
|
||||
|
|
@ -177,30 +147,18 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
"resource with prune error": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.PruneEvent{
|
||||
Type: event.PruneEventFailed,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
Error: fmt.Errorf("this is a test"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep prune failed: this is a test",
|
||||
},
|
||||
"prune event with completed status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.PruneEvent{
|
||||
Type: event.PruneEventCompleted,
|
||||
},
|
||||
pruneStats: &list.PruneStats{
|
||||
Pruned: 1,
|
||||
Skipped: 2,
|
||||
},
|
||||
expected: "1 resource(s) pruned, 2 skipped, 0 failed",
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range testCases {
|
||||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatPruneEvent(tc.event, tc.pruneStats)
|
||||
err := formatter.FormatPruneEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(out.String()))
|
||||
|
|
@ -219,49 +177,37 @@ func TestFormatter_FormatDeleteEvent(t *testing.T) {
|
|||
"resource deleted without no dryrun": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.DeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Type: event.DeleteEventResourceUpdate,
|
||||
Object: createObject("apps", "Deployment", "default", "my-dep"),
|
||||
Operation: event.Deleted,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
Object: createObject("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep deleted",
|
||||
},
|
||||
"resource skipped with client dryrun": {
|
||||
previewStrategy: common.DryRunClient,
|
||||
event: event.DeleteEvent{
|
||||
Operation: event.DeleteSkipped,
|
||||
Type: event.DeleteEventResourceUpdate,
|
||||
Object: createObject("apps", "Deployment", "", "my-dep"),
|
||||
Operation: event.DeleteSkipped,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
Object: createObject("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep delete skipped (preview)",
|
||||
},
|
||||
"resource with delete error": {
|
||||
previewStrategy: common.DryRunServer,
|
||||
event: event.DeleteEvent{
|
||||
Type: event.DeleteEventFailed,
|
||||
Object: createObject("apps", "Deployment", "", "my-dep"),
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
Error: fmt.Errorf("this is a test"),
|
||||
},
|
||||
expected: "deployment.apps/my-dep deletion failed: this is a test (preview-server)",
|
||||
},
|
||||
"delete event with completed status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.DeleteEvent{
|
||||
Type: event.DeleteEventCompleted,
|
||||
},
|
||||
deleteStats: &list.DeleteStats{
|
||||
Deleted: 1,
|
||||
Skipped: 2,
|
||||
},
|
||||
expected: "1 resource(s) deleted, 2 skipped",
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range testCases {
|
||||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatDeleteEvent(tc.event, tc.deleteStats)
|
||||
err := formatter.FormatDeleteEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(out.String()))
|
||||
|
|
@ -292,11 +238,3 @@ func createIdentifier(group, kind, namespace, name string) object.ObjMetadata {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
type fakeCollector struct {
|
||||
m map[object.ObjMetadata]event.StatusEvent
|
||||
}
|
||||
|
||||
func (f *fakeCollector) LatestStatus() map[object.ObjMetadata]event.StatusEvent {
|
||||
return f.m
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,9 +28,56 @@ type formatter struct {
|
|||
ioStreams genericclioptions.IOStreams
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatApplyEvent(ae event.ApplyEvent, as *list.ApplyStats, c list.Collector) error {
|
||||
switch ae.Type {
|
||||
case event.ApplyEventCompleted:
|
||||
func (jf *formatter) FormatApplyEvent(ae event.ApplyEvent) error {
|
||||
eventInfo := jf.baseResourceEvent(ae.Identifier)
|
||||
if ae.Error != nil {
|
||||
eventInfo["error"] = ae.Error.Error()
|
||||
return jf.printEvent("apply", "resourceFailed", eventInfo)
|
||||
}
|
||||
eventInfo["operation"] = ae.Operation.String()
|
||||
return jf.printEvent("apply", "resourceApplied", eventInfo)
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatStatusEvent(se event.StatusEvent) error {
|
||||
return jf.printResourceStatus(se)
|
||||
}
|
||||
|
||||
func (jf *formatter) printResourceStatus(se event.StatusEvent) error {
|
||||
eventInfo := jf.baseResourceEvent(se.Identifier)
|
||||
eventInfo["status"] = se.PollResourceInfo.Status.String()
|
||||
eventInfo["message"] = se.PollResourceInfo.Message
|
||||
return jf.printEvent("status", "resourceStatus", eventInfo)
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatPruneEvent(pe event.PruneEvent) error {
|
||||
eventInfo := jf.baseResourceEvent(pe.Identifier)
|
||||
if pe.Error != nil {
|
||||
eventInfo["error"] = pe.Error.Error()
|
||||
return jf.printEvent("prune", "resourceFailed", eventInfo)
|
||||
}
|
||||
eventInfo["operation"] = pe.Operation.String()
|
||||
return jf.printEvent("prune", "resourcePruned", eventInfo)
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatDeleteEvent(de event.DeleteEvent) error {
|
||||
eventInfo := jf.baseResourceEvent(de.Identifier)
|
||||
if de.Error != nil {
|
||||
eventInfo["error"] = de.Error.Error()
|
||||
return jf.printEvent("delete", "resourceFailed", eventInfo)
|
||||
}
|
||||
eventInfo["operation"] = de.Operation.String()
|
||||
return jf.printEvent("delete", "resourceDeleted", eventInfo)
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatErrorEvent(ee event.ErrorEvent) error {
|
||||
return jf.printEvent("error", "error", map[string]interface{}{
|
||||
"error": ee.Err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatActionGroupEvent(age event.ActionGroupEvent, ags []event.ActionGroup,
|
||||
as *list.ApplyStats, ps *list.PruneStats, ds *list.DeleteStats, c list.Collector) error {
|
||||
if age.Action == event.ApplyAction && age.Type == event.Finished {
|
||||
if err := jf.printEvent("apply", "completed", map[string]interface{}{
|
||||
"count": as.Sum(),
|
||||
"createdCount": as.Created,
|
||||
|
|
@ -41,112 +88,47 @@ func (jf *formatter) FormatApplyEvent(ae event.ApplyEvent, as *list.ApplyStats,
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, se := range c.LatestStatus() {
|
||||
if err := jf.printResourceStatus(id, se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case event.ApplyEventResourceUpdate:
|
||||
gk := ae.Identifier.GroupKind
|
||||
eventInfo := map[string]interface{}{
|
||||
"group": gk.Group,
|
||||
"kind": gk.Kind,
|
||||
"namespace": ae.Identifier.Namespace,
|
||||
"name": ae.Identifier.Name,
|
||||
"operation": ae.Operation.String(),
|
||||
}
|
||||
if ae.Error != nil {
|
||||
eventInfo["error"] = ae.Error.Error()
|
||||
}
|
||||
|
||||
return jf.printEvent("apply", "resourceApplied", eventInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatStatusEvent(se event.StatusEvent, _ list.Collector) error {
|
||||
if se.Type == event.StatusEventResourceUpdate {
|
||||
id := se.Resource.Identifier
|
||||
return jf.printResourceStatus(id, se)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jf *formatter) printResourceStatus(id object.ObjMetadata, se event.StatusEvent) error {
|
||||
return jf.printEvent("status", "resourceStatus",
|
||||
map[string]interface{}{
|
||||
"group": id.GroupKind.Group,
|
||||
"kind": id.GroupKind.Kind,
|
||||
"namespace": id.Namespace,
|
||||
"name": id.Name,
|
||||
"status": se.Resource.Status.String(),
|
||||
"message": se.Resource.Message,
|
||||
})
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatPruneEvent(pe event.PruneEvent, ps *list.PruneStats) error {
|
||||
switch pe.Type {
|
||||
case event.PruneEventCompleted:
|
||||
if age.Action == event.PruneAction && age.Type == event.Finished {
|
||||
return jf.printEvent("prune", "completed", map[string]interface{}{
|
||||
"pruned": ps.Pruned,
|
||||
"skipped": ps.Skipped,
|
||||
})
|
||||
case event.PruneEventResourceUpdate:
|
||||
gk := pe.Identifier.GroupKind
|
||||
return jf.printEvent("prune", "resourcePruned", map[string]interface{}{
|
||||
"group": gk.Group,
|
||||
"kind": gk.Kind,
|
||||
"namespace": pe.Identifier.Namespace,
|
||||
"name": pe.Identifier.Name,
|
||||
"operation": pe.Operation.String(),
|
||||
})
|
||||
case event.PruneEventFailed:
|
||||
gk := pe.Identifier.GroupKind
|
||||
return jf.printEvent("prune", "resourceFailed", map[string]interface{}{
|
||||
"group": gk.Group,
|
||||
"kind": gk.Kind,
|
||||
"namespace": pe.Identifier.Namespace,
|
||||
"name": pe.Identifier.Name,
|
||||
"error": pe.Error.Error(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatDeleteEvent(de event.DeleteEvent, ds *list.DeleteStats) error {
|
||||
switch de.Type {
|
||||
case event.DeleteEventCompleted:
|
||||
if age.Action == event.DeleteAction && age.Type == event.Finished {
|
||||
return jf.printEvent("delete", "completed", map[string]interface{}{
|
||||
"deleted": ds.Deleted,
|
||||
"skipped": ds.Skipped,
|
||||
})
|
||||
case event.DeleteEventResourceUpdate:
|
||||
gk := de.Identifier.GroupKind
|
||||
return jf.printEvent("delete", "resourceDeleted", map[string]interface{}{
|
||||
"group": gk.Group,
|
||||
"kind": gk.Kind,
|
||||
"namespace": de.Identifier.Namespace,
|
||||
"name": de.Identifier.Name,
|
||||
"operation": de.Operation.String(),
|
||||
})
|
||||
case event.DeleteEventFailed:
|
||||
gk := de.Identifier.GroupKind
|
||||
return jf.printEvent("delete", "resourceFailed", map[string]interface{}{
|
||||
"group": gk.Group,
|
||||
"kind": gk.Kind,
|
||||
"namespace": de.Identifier.Namespace,
|
||||
"name": de.Identifier.Name,
|
||||
"error": de.Error.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
if age.Action == event.WaitAction && age.Type == event.Started {
|
||||
ag, found := list.ActionGroupByName(age.GroupName, ags)
|
||||
if !found {
|
||||
panic(fmt.Errorf("unknown action group name %q", age.GroupName))
|
||||
}
|
||||
for id, se := range c.LatestStatus() {
|
||||
// Only print information about objects that we actually care about
|
||||
// for this wait task.
|
||||
if found := object.ObjMetas(ag.Identifiers).Contains(id); found {
|
||||
if err := jf.printResourceStatus(se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jf *formatter) FormatErrorEvent(ee event.ErrorEvent) error {
|
||||
return jf.printEvent("error", "error", map[string]interface{}{
|
||||
"error": ee.Err.Error(),
|
||||
})
|
||||
func (jf *formatter) baseResourceEvent(identifier object.ObjMetadata) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"group": identifier.GroupKind.Group,
|
||||
"kind": identifier.GroupKind.Kind,
|
||||
"namespace": identifier.Namespace,
|
||||
"name": identifier.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func (jf *formatter) printEvent(t, eventType string, content map[string]interface{}) error {
|
||||
|
|
|
|||
|
|
@ -23,15 +23,12 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
testCases := map[string]struct {
|
||||
previewStrategy common.DryRunStrategy
|
||||
event event.ApplyEvent
|
||||
applyStats *list.ApplyStats
|
||||
statusCollector list.Collector
|
||||
expected []map[string]interface{}
|
||||
}{
|
||||
"resource created without dryrun": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Created,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
|
|
@ -51,7 +48,6 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunClient,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
|
|
@ -71,7 +67,6 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunServer,
|
||||
event: event.ApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: createIdentifier("batch", "CronJob", "foo", "my-cron"),
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
|
|
@ -87,63 +82,13 @@ func TestFormatter_FormatApplyEvent(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
"completed event": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.ApplyEvent{
|
||||
Type: event.ApplyEventCompleted,
|
||||
},
|
||||
applyStats: &list.ApplyStats{
|
||||
ServersideApplied: 1,
|
||||
},
|
||||
statusCollector: &fakeCollector{
|
||||
m: map[object.ObjMetadata]event.StatusEvent{
|
||||
object.ObjMetadata{ //nolint:gofmt
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Namespace: "foo",
|
||||
Name: "my-dep",
|
||||
}: {
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Status: status.CurrentStatus,
|
||||
Message: "Resource is Current",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"configuredCount": 0,
|
||||
"count": 1,
|
||||
"createdCount": 0,
|
||||
"eventType": "completed",
|
||||
"failedCount": 0,
|
||||
"serverSideCount": 1,
|
||||
"type": "apply",
|
||||
"unchangedCount": 0,
|
||||
"timestamp": "",
|
||||
},
|
||||
{
|
||||
"eventType": "resourceStatus",
|
||||
"group": "apps",
|
||||
"kind": "Deployment",
|
||||
"message": "Resource is Current",
|
||||
"name": "my-dep",
|
||||
"namespace": "foo",
|
||||
"status": "Current",
|
||||
"timestamp": "",
|
||||
"type": "status",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range testCases {
|
||||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatApplyEvent(tc.event, tc.applyStats, tc.statusCollector)
|
||||
err := formatter.FormatApplyEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
objects := strings.Split(strings.TrimSpace(out.String()), "\n")
|
||||
|
|
@ -162,14 +107,20 @@ func TestFormatter_FormatStatusEvent(t *testing.T) {
|
|||
testCases := map[string]struct {
|
||||
previewStrategy common.DryRunStrategy
|
||||
event event.StatusEvent
|
||||
statusCollector list.Collector
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
"resource update with Current status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Identifier: object.ObjMetadata{
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
PollResourceInfo: &pollevent.ResourceStatus{
|
||||
Identifier: object.ObjMetadata{
|
||||
GroupKind: schema.GroupKind{
|
||||
Group: "apps",
|
||||
|
|
@ -200,7 +151,7 @@ func TestFormatter_FormatStatusEvent(t *testing.T) {
|
|||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatStatusEvent(tc.event, tc.statusCollector)
|
||||
err := formatter.FormatStatusEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assertOutput(t, tc.expected, out.String())
|
||||
|
|
@ -219,7 +170,6 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunNone,
|
||||
event: event.PruneEvent{
|
||||
Operation: event.Pruned,
|
||||
Type: event.PruneEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
|
|
@ -237,7 +187,6 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunClient,
|
||||
event: event.PruneEvent{
|
||||
Operation: event.PruneSkipped,
|
||||
Type: event.PruneEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
|
|
@ -251,30 +200,13 @@ func TestFormatter_FormatPruneEvent(t *testing.T) {
|
|||
"type": "prune",
|
||||
},
|
||||
},
|
||||
"prune event with completed status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.PruneEvent{
|
||||
Type: event.PruneEventCompleted,
|
||||
},
|
||||
pruneStats: &list.PruneStats{
|
||||
Pruned: 1,
|
||||
Skipped: 2,
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"eventType": "completed",
|
||||
"skipped": 2,
|
||||
"pruned": 1,
|
||||
"timestamp": "",
|
||||
"type": "prune",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range testCases {
|
||||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatPruneEvent(tc.event, tc.pruneStats)
|
||||
err := formatter.FormatPruneEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assertOutput(t, tc.expected, out.String())
|
||||
|
|
@ -294,7 +226,6 @@ func TestFormatter_FormatDeleteEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunNone,
|
||||
event: event.DeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Type: event.DeleteEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "default", "my-dep"),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
|
|
@ -312,7 +243,6 @@ func TestFormatter_FormatDeleteEvent(t *testing.T) {
|
|||
previewStrategy: common.DryRunClient,
|
||||
event: event.DeleteEvent{
|
||||
Operation: event.DeleteSkipped,
|
||||
Type: event.DeleteEventResourceUpdate,
|
||||
Identifier: createIdentifier("apps", "Deployment", "", "my-dep"),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
|
|
@ -326,30 +256,13 @@ func TestFormatter_FormatDeleteEvent(t *testing.T) {
|
|||
"type": "delete",
|
||||
},
|
||||
},
|
||||
"delete event with completed status": {
|
||||
previewStrategy: common.DryRunNone,
|
||||
event: event.DeleteEvent{
|
||||
Type: event.DeleteEventCompleted,
|
||||
},
|
||||
deleteStats: &list.DeleteStats{
|
||||
Deleted: 1,
|
||||
Skipped: 2,
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"deleted": 1,
|
||||
"eventType": "completed",
|
||||
"skipped": 2,
|
||||
"timestamp": "",
|
||||
"type": "delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range testCases {
|
||||
t.Run(tn, func(t *testing.T) {
|
||||
ioStreams, _, out, _ := genericclioptions.NewTestIOStreams() //nolint:dogsled
|
||||
formatter := NewFormatter(ioStreams, tc.previewStrategy)
|
||||
err := formatter.FormatDeleteEvent(tc.event, tc.deleteStats)
|
||||
err := formatter.FormatDeleteEvent(tc.event)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assertOutput(t, tc.expected, out.String())
|
||||
|
|
@ -394,11 +307,3 @@ func createIdentifier(group, kind, namespace, name string) object.ObjMetadata {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
type fakeCollector struct {
|
||||
m map[object.ObjMetadata]event.StatusEvent
|
||||
}
|
||||
|
||||
func (f *fakeCollector) LatestStatus() map[object.ObjMetadata]event.StatusEvent {
|
||||
return f.m
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/print/table"
|
||||
)
|
||||
|
||||
func newResourceStateCollector(resourceGroups []event.ResourceGroup) *ResourceStateCollector {
|
||||
func newResourceStateCollector(resourceGroups []event.ActionGroup) *ResourceStateCollector {
|
||||
resourceInfos := make(map[object.ObjMetadata]*ResourceInfo)
|
||||
for _, group := range resourceGroups {
|
||||
action := group.Action
|
||||
|
|
@ -70,15 +70,15 @@ type ResourceInfo struct {
|
|||
|
||||
// ApplyOpResult contains the result after
|
||||
// a resource has been applied to the cluster.
|
||||
ApplyOpResult *event.ApplyEventOperation
|
||||
ApplyOpResult event.ApplyEventOperation
|
||||
|
||||
// PruneOpResult contains the result after
|
||||
// a prune operation on a resource
|
||||
PruneOpResult *event.PruneEventOperation
|
||||
PruneOpResult event.PruneEventOperation
|
||||
|
||||
// DeleteOpResult contains the result after
|
||||
// a delete operation on a resource
|
||||
DeleteOpResult *event.DeleteEventOperation
|
||||
DeleteOpResult event.DeleteEventOperation
|
||||
}
|
||||
|
||||
// Identifier returns the identifier for the given resource.
|
||||
|
|
@ -182,48 +182,37 @@ func (r *ResourceStateCollector) processEvent(ev event.Event) error {
|
|||
// processStatusEvent handles events pertaining to a status
|
||||
// update for a resource.
|
||||
func (r *ResourceStateCollector) processStatusEvent(e event.StatusEvent) {
|
||||
if e.Type == event.StatusEventResourceUpdate {
|
||||
klog.V(7).Infoln("processing status event")
|
||||
resource := e.Resource
|
||||
if resource == nil {
|
||||
klog.V(4).Infoln("status event missing Resource field; no processing")
|
||||
return
|
||||
}
|
||||
previous, found := r.resourceInfos[resource.Identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s status event not found in ResourceInfos; no processing", resource.Identifier)
|
||||
return
|
||||
}
|
||||
previous.resourceStatus = e.Resource
|
||||
klog.V(7).Infoln("processing status event")
|
||||
previous, found := r.resourceInfos[e.Identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s status event not found in ResourceInfos; no processing", e.Identifier)
|
||||
return
|
||||
}
|
||||
previous.resourceStatus = e.PollResourceInfo
|
||||
}
|
||||
|
||||
// processApplyEvent handles events relating to apply operations
|
||||
func (r *ResourceStateCollector) processApplyEvent(e event.ApplyEvent) {
|
||||
if e.Type == event.ApplyEventResourceUpdate {
|
||||
identifier := e.Identifier
|
||||
klog.V(7).Infof("processing apply event for %s", identifier)
|
||||
previous, found := r.resourceInfos[identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s apply event not found in ResourceInfos; no processing", identifier)
|
||||
return
|
||||
}
|
||||
previous.ApplyOpResult = &e.Operation
|
||||
identifier := e.Identifier
|
||||
klog.V(7).Infof("processing apply event for %s", identifier)
|
||||
previous, found := r.resourceInfos[identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s apply event not found in ResourceInfos; no processing", identifier)
|
||||
return
|
||||
}
|
||||
previous.ApplyOpResult = e.Operation
|
||||
}
|
||||
|
||||
// processPruneEvent handles event related to prune operations.
|
||||
func (r *ResourceStateCollector) processPruneEvent(e event.PruneEvent) {
|
||||
if e.Type == event.PruneEventResourceUpdate {
|
||||
identifier := e.Identifier
|
||||
klog.V(7).Infof("processing prune event for %s", identifier)
|
||||
previous, found := r.resourceInfos[identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s prune event not found in ResourceInfos; no processing", identifier)
|
||||
return
|
||||
}
|
||||
previous.PruneOpResult = &e.Operation
|
||||
identifier := e.Identifier
|
||||
klog.V(7).Infof("processing prune event for %s", identifier)
|
||||
previous, found := r.resourceInfos[identifier]
|
||||
if !found {
|
||||
klog.V(4).Infof("%s prune event not found in ResourceInfos; no processing", identifier)
|
||||
return
|
||||
}
|
||||
previous.PruneOpResult = e.Operation
|
||||
}
|
||||
|
||||
// ResourceState contains the latest state for all the resources.
|
||||
|
|
|
|||
|
|
@ -35,15 +35,15 @@ const testMessage = "test message for ResourceStatus"
|
|||
|
||||
func TestResourceStateCollector_New(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
resourceGroups []event.ResourceGroup
|
||||
resourceGroups []event.ActionGroup
|
||||
resourceInfos map[object.ObjMetadata]*ResourceInfo
|
||||
}{
|
||||
"no resources": {
|
||||
resourceGroups: []event.ResourceGroup{},
|
||||
resourceGroups: []event.ActionGroup{},
|
||||
resourceInfos: map[object.ObjMetadata]*ResourceInfo{},
|
||||
},
|
||||
"several resources for apply": {
|
||||
resourceGroups: []event.ResourceGroup{
|
||||
resourceGroups: []event.ActionGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: []object.ObjMetadata{
|
||||
|
|
@ -61,7 +61,7 @@ func TestResourceStateCollector_New(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"several resources for prune": {
|
||||
resourceGroups: []event.ResourceGroup{
|
||||
resourceGroups: []event.ActionGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: []object.ObjMetadata{
|
||||
|
|
@ -104,39 +104,30 @@ func TestResourceStateCollector_New(t *testing.T) {
|
|||
|
||||
func TestResourceStateCollector_ProcessStatusEvent(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
resourceGroups []event.ResourceGroup
|
||||
resourceGroups []event.ActionGroup
|
||||
statusEvent event.StatusEvent
|
||||
}{
|
||||
"nil StatusEvent.Resource does not crash": {
|
||||
resourceGroups: []event.ResourceGroup{},
|
||||
resourceGroups: []event.ActionGroup{},
|
||||
statusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: nil,
|
||||
},
|
||||
},
|
||||
"type StatusEventCompleted does nothing": {
|
||||
resourceGroups: []event.ResourceGroup{},
|
||||
statusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventCompleted,
|
||||
Resource: nil,
|
||||
},
|
||||
},
|
||||
"unfound Resource identifier does not crash": {
|
||||
resourceGroups: []event.ResourceGroup{
|
||||
resourceGroups: []event.ActionGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: []object.ObjMetadata{depID},
|
||||
},
|
||||
},
|
||||
statusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: &pe.ResourceStatus{
|
||||
PollResourceInfo: &pe.ResourceStatus{
|
||||
Identifier: customID, // Does not match identifier in resourceGroups
|
||||
},
|
||||
},
|
||||
},
|
||||
"basic status event for applying two resources updates resourceStatus": {
|
||||
resourceGroups: []event.ResourceGroup{
|
||||
resourceGroups: []event.ActionGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: []object.ObjMetadata{
|
||||
|
|
@ -145,15 +136,14 @@ func TestResourceStateCollector_ProcessStatusEvent(t *testing.T) {
|
|||
},
|
||||
},
|
||||
statusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: &pe.ResourceStatus{
|
||||
PollResourceInfo: &pe.ResourceStatus{
|
||||
Identifier: depID,
|
||||
Message: testMessage,
|
||||
},
|
||||
},
|
||||
},
|
||||
"several resources for prune": {
|
||||
resourceGroups: []event.ResourceGroup{
|
||||
resourceGroups: []event.ActionGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: []object.ObjMetadata{
|
||||
|
|
@ -168,8 +158,7 @@ func TestResourceStateCollector_ProcessStatusEvent(t *testing.T) {
|
|||
},
|
||||
},
|
||||
statusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: &pe.ResourceStatus{
|
||||
PollResourceInfo: &pe.ResourceStatus{
|
||||
Identifier: depID,
|
||||
Message: testMessage,
|
||||
},
|
||||
|
|
@ -186,7 +175,7 @@ func TestResourceStateCollector_ProcessStatusEvent(t *testing.T) {
|
|||
resourceInfo, found := rsc.resourceInfos[id]
|
||||
if found {
|
||||
// Validate the ResourceStatus was set from StatusEvent
|
||||
if resourceInfo.resourceStatus != tc.statusEvent.Resource {
|
||||
if resourceInfo.resourceStatus != tc.statusEvent.PollResourceInfo {
|
||||
t.Errorf("status event not processed for %s", id)
|
||||
}
|
||||
}
|
||||
|
|
@ -199,5 +188,5 @@ func getID(e event.StatusEvent) (object.ObjMetadata, bool) {
|
|||
if e.Resource == nil {
|
||||
return object.ObjMetadata{}, false
|
||||
}
|
||||
return e.Resource.Identifier, true
|
||||
return e.Identifier, true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ func (t *Printer) Print(ch <-chan event.Event, _ common.DryRunStrategy) error {
|
|||
}
|
||||
// Create a new collector and initialize it with the resources
|
||||
// we are interested in.
|
||||
coll := newResourceStateCollector(initEvent.ResourceGroups)
|
||||
coll := newResourceStateCollector(initEvent.ActionGroups)
|
||||
|
||||
stop := make(chan struct{})
|
||||
|
||||
|
|
@ -89,11 +89,11 @@ var (
|
|||
var text string
|
||||
switch resInfo.ResourceAction {
|
||||
case event.ApplyAction:
|
||||
if resInfo.ApplyOpResult != nil {
|
||||
if resInfo.ApplyOpResult != event.ApplyUnspecified {
|
||||
text = resInfo.ApplyOpResult.String()
|
||||
}
|
||||
case event.PruneAction:
|
||||
if resInfo.PruneOpResult != nil {
|
||||
if resInfo.PruneOpResult != event.PruneUnspecified {
|
||||
text = resInfo.PruneOpResult.String()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ func TestActionColumnDef(t *testing.T) {
|
|||
"applied": {
|
||||
resource: &ResourceInfo{
|
||||
ResourceAction: event.ApplyAction,
|
||||
ApplyOpResult: &createdOpResult,
|
||||
ApplyOpResult: createdOpResult,
|
||||
},
|
||||
columnWidth: 15,
|
||||
expectedOutput: "Created",
|
||||
|
|
@ -43,7 +43,7 @@ func TestActionColumnDef(t *testing.T) {
|
|||
"pruned": {
|
||||
resource: &ResourceInfo{
|
||||
ResourceAction: event.PruneAction,
|
||||
PruneOpResult: &prunedOpResult,
|
||||
PruneOpResult: prunedOpResult,
|
||||
},
|
||||
columnWidth: 15,
|
||||
expectedOutput: "Pruned",
|
||||
|
|
@ -51,7 +51,7 @@ func TestActionColumnDef(t *testing.T) {
|
|||
"trimmed output": {
|
||||
resource: &ResourceInfo{
|
||||
ResourceAction: event.ApplyAction,
|
||||
ApplyOpResult: &createdOpResult,
|
||||
ApplyOpResult: createdOpResult,
|
||||
},
|
||||
columnWidth: 5,
|
||||
expectedOutput: "Creat",
|
||||
|
|
|
|||
|
|
@ -124,8 +124,6 @@ Use the `kapply` binary in `MYGOBIN` to apply both the CRD and the CR.
|
|||
```
|
||||
kapply apply $BASE --reconcile-timeout=1m > $OUTPUT/status
|
||||
|
||||
expectedOutputLine "customresourcedefinition.apiextensions.k8s.io/foos.custom.io is Current: CRD is established"
|
||||
|
||||
expectedOutputLine "foo.custom.io/example-foo is Current: Resource is current"
|
||||
|
||||
kubectl get crd --no-headers | awk '{print $1}' > $OUTPUT/status
|
||||
|
|
|
|||
|
|
@ -259,16 +259,7 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, obje
|
|||
eventChannel <- event.Event{
|
||||
Type: event.InitType,
|
||||
InitEvent: event.InitEvent{
|
||||
ResourceGroups: []event.ResourceGroup{
|
||||
{
|
||||
Action: event.ApplyAction,
|
||||
Identifiers: resourceObjects.IdsForApply(),
|
||||
},
|
||||
{
|
||||
Action: event.PruneAction,
|
||||
Identifiers: resourceObjects.IdsForPrune(),
|
||||
},
|
||||
},
|
||||
ActionGroups: taskQueue.ToActionGroups(),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -276,7 +267,7 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, obje
|
|||
klog.V(4).Infoln("applier building TaskStatusRunner...")
|
||||
runner := taskrunner.NewTaskStatusRunner(resourceObjects.AllIds(), a.StatusPoller)
|
||||
klog.V(4).Infoln("applier running TaskStatusRunner...")
|
||||
err = runner.Run(ctx, taskQueue, eventChannel, taskrunner.Options{
|
||||
err = runner.Run(ctx, taskQueue.ToChannel(), eventChannel, taskrunner.Options{
|
||||
PollInterval: options.PollInterval,
|
||||
UseCache: true,
|
||||
EmitStatusEvents: options.EmitStatusEvents,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
|
@ -35,6 +34,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/provider"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -138,30 +138,18 @@ func (i inventoryInfo) toWrapped() inventory.InventoryInfo {
|
|||
return inventory.WrapInventoryInfoObj(inv)
|
||||
}
|
||||
|
||||
type expectedEvent struct {
|
||||
eventType event.Type
|
||||
|
||||
applyEventType event.ApplyEventType
|
||||
statusEventType event.StatusEventType
|
||||
pruneEventType event.PruneEventType
|
||||
deleteEventType event.DeleteEventType
|
||||
|
||||
applyErrorType error
|
||||
pruneEventOp event.PruneEventOperation
|
||||
}
|
||||
|
||||
func TestApplier(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
namespace string
|
||||
resources []*unstructured.Unstructured
|
||||
invInfo inventoryInfo
|
||||
clusterObjs []*unstructured.Unstructured
|
||||
handlers []handler
|
||||
reconcileTimeout time.Duration
|
||||
prune bool
|
||||
inventoryPolicy inventory.InventoryPolicy
|
||||
statusEvents []pollevent.Event
|
||||
expectedEventTypes []expectedEvent
|
||||
namespace string
|
||||
resources []*unstructured.Unstructured
|
||||
invInfo inventoryInfo
|
||||
clusterObjs []*unstructured.Unstructured
|
||||
handlers []handler
|
||||
reconcileTimeout time.Duration
|
||||
prune bool
|
||||
inventoryPolicy inventory.InventoryPolicy
|
||||
statusEvents []pollevent.Event
|
||||
expectedEvents []testutil.ExpEvent
|
||||
}{
|
||||
"initial apply without status or prune": {
|
||||
namespace: "default",
|
||||
|
|
@ -177,17 +165,18 @@ func TestApplier(t *testing.T) {
|
|||
reconcileTimeout: time.Duration(0),
|
||||
prune: false,
|
||||
inventoryPolicy: inventory.InventoryPolicyMustMatch,
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -232,41 +221,42 @@ func TestApplier(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
EventType: event.StatusType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventCompleted,
|
||||
EventType: event.StatusType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.StatusType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -310,37 +300,39 @@ func TestApplier(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventCompleted,
|
||||
EventType: event.StatusType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.StatusType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -368,31 +360,39 @@ func TestApplier(t *testing.T) {
|
|||
prune: true,
|
||||
inventoryPolicy: inventory.InventoryPolicyMustMatch,
|
||||
statusEvents: []pollevent.Event{},
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventResourceUpdate,
|
||||
pruneEventOp: event.Pruned,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventResourceUpdate,
|
||||
pruneEventOp: event.Pruned,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.PruneType,
|
||||
PruneEvent: &testutil.ExpPruneEvent{
|
||||
Operation: event.Pruned,
|
||||
},
|
||||
},
|
||||
{
|
||||
EventType: event.PruneType,
|
||||
PruneEvent: &testutil.ExpPruneEvent{
|
||||
Operation: event.Pruned,
|
||||
},
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -412,27 +412,49 @@ func TestApplier(t *testing.T) {
|
|||
reconcileTimeout: time.Minute,
|
||||
prune: true,
|
||||
inventoryPolicy: inventory.InventoryPolicyMustMatch,
|
||||
statusEvents: []pollevent.Event{},
|
||||
expectedEventTypes: []expectedEvent{
|
||||
statusEvents: []pollevent.Event{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: pollevent.ResourceUpdateEvent,
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Identifier: toIdentifier(t, resources["deployment"]),
|
||||
Status: status.InProgressStatus,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
applyErrorType: inventory.NewInventoryOverlapError(fmt.Errorf("")),
|
||||
EventType: pollevent.ResourceUpdateEvent,
|
||||
Resource: &pollevent.ResourceStatus{
|
||||
Identifier: toIdentifier(t, resources["deployment"]),
|
||||
Status: status.CurrentStatus,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEventType: event.StatusEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Error: inventory.NewInventoryOverlapError(fmt.Errorf("")),
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -455,22 +477,27 @@ func TestApplier(t *testing.T) {
|
|||
reconcileTimeout: 0,
|
||||
prune: true,
|
||||
inventoryPolicy: inventory.InventoryPolicyMustMatch,
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventResourceUpdate,
|
||||
pruneEventOp: event.PruneSkipped,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.PruneType,
|
||||
PruneEvent: &testutil.ExpPruneEvent{
|
||||
Operation: event.PruneSkipped,
|
||||
},
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -493,22 +520,27 @@ func TestApplier(t *testing.T) {
|
|||
reconcileTimeout: 0,
|
||||
prune: true,
|
||||
inventoryPolicy: inventory.InventoryPolicyMustMatch,
|
||||
expectedEventTypes: []expectedEvent{
|
||||
expectedEvents: []testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventResourceUpdate,
|
||||
pruneEventOp: event.Pruned,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
pruneEventType: event.PruneEventCompleted,
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
{
|
||||
EventType: event.PruneType,
|
||||
PruneEvent: &testutil.ExpPruneEvent{
|
||||
Operation: event.Pruned,
|
||||
},
|
||||
},
|
||||
{
|
||||
EventType: event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -590,39 +622,29 @@ func TestApplier(t *testing.T) {
|
|||
})
|
||||
|
||||
var events []event.Event
|
||||
for e := range eventChannel {
|
||||
if e.Type == event.ApplyType && e.ApplyEvent.Type == event.ApplyEventCompleted {
|
||||
close(poller.start)
|
||||
}
|
||||
events = append(events, e)
|
||||
}
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
|
||||
if !assert.Equal(t, len(tc.expectedEventTypes), len(events)) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
for i, e := range events {
|
||||
expected := tc.expectedEventTypes[i]
|
||||
if !assert.Equal(t, expected.eventType.String(), e.Type.String()) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
switch expected.eventType {
|
||||
case event.InitType:
|
||||
case event.ApplyType:
|
||||
assert.Equal(t, expected.applyEventType.String(), e.ApplyEvent.Type.String())
|
||||
assert.Equal(t, reflect.TypeOf(expected.applyErrorType), reflect.TypeOf(e.ApplyEvent.Error))
|
||||
case event.StatusType:
|
||||
assert.Equal(t, expected.statusEventType.String(), e.StatusEvent.Type.String())
|
||||
case event.PruneType:
|
||||
assert.Equal(t, expected.pruneEventType.String(), e.PruneEvent.Type.String())
|
||||
assert.Equal(t, expected.pruneEventOp.String(), e.PruneEvent.Operation.String())
|
||||
case event.DeleteType:
|
||||
assert.Equal(t, expected.deleteEventType.String(), e.DeleteEvent.Type.String())
|
||||
default:
|
||||
assert.Fail(t, "unexpected event type %s", expected.eventType.String())
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case e, ok := <-eventChannel:
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
if e.Type == event.ActionGroupType &&
|
||||
e.ActionGroupEvent.Action == event.ApplyAction &&
|
||||
e.ActionGroupEvent.Type == event.Finished {
|
||||
close(poller.start)
|
||||
}
|
||||
events = append(events, e)
|
||||
case <-timer.C:
|
||||
t.Errorf("timeout")
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
err = testutil.VerifyEvents(tc.expectedEvents, events)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,9 +110,11 @@ func (d *Destroyer) Run(inv inventory.InventoryInfo, option *DestroyerOption) <-
|
|||
// events and shut down before we continue.
|
||||
<-completedChannel
|
||||
ch <- event.Event{
|
||||
Type: event.DeleteType,
|
||||
DeleteEvent: event.DeleteEvent{
|
||||
Type: event.DeleteEventCompleted,
|
||||
Type: event.ActionGroupType,
|
||||
ActionGroupEvent: event.ActionGroupEvent{
|
||||
GroupName: "destroyer",
|
||||
Type: event.Finished,
|
||||
Action: event.DeleteAction,
|
||||
},
|
||||
}
|
||||
}()
|
||||
|
|
@ -135,19 +137,9 @@ func runPruneEventTransformer(eventChannel chan event.Event) (chan event.Event,
|
|||
if msg.Type != event.PruneType {
|
||||
eventChannel <- msg
|
||||
} else {
|
||||
var deleteEventType event.DeleteEventType
|
||||
switch msg.PruneEvent.Type {
|
||||
case event.PruneEventFailed:
|
||||
deleteEventType = event.DeleteEventFailed
|
||||
case event.PruneEventCompleted:
|
||||
deleteEventType = event.DeleteEventCompleted
|
||||
case event.PruneEventResourceUpdate:
|
||||
deleteEventType = event.DeleteEventResourceUpdate
|
||||
}
|
||||
eventChannel <- event.Event{
|
||||
Type: event.DeleteType,
|
||||
DeleteEvent: event.DeleteEvent{
|
||||
Type: deleteEventType,
|
||||
Operation: transformPruneOperation(msg.PruneEvent.Operation),
|
||||
Object: msg.PruneEvent.Object,
|
||||
Identifier: msg.PruneEvent.Identifier,
|
||||
|
|
@ -161,12 +153,16 @@ func runPruneEventTransformer(eventChannel chan event.Event) (chan event.Event,
|
|||
}
|
||||
|
||||
func transformPruneOperation(pruneOp event.PruneEventOperation) event.DeleteEventOperation {
|
||||
var deleteOp event.DeleteEventOperation
|
||||
switch pruneOp {
|
||||
case event.PruneSkipped:
|
||||
return event.DeleteSkipped
|
||||
deleteOp = event.DeleteSkipped
|
||||
case event.Pruned:
|
||||
return event.Deleted
|
||||
deleteOp = event.Deleted
|
||||
case event.PruneUnspecified:
|
||||
deleteOp = event.DeleteUnspecified
|
||||
default:
|
||||
panic(fmt.Errorf("unknown prune operation %s", pruneOp.String()))
|
||||
}
|
||||
return deleteOp
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
// Code generated by "stringer -type=ActionGroupEventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Started-0]
|
||||
_ = x[Finished-1]
|
||||
}
|
||||
|
||||
const _ActionGroupEventType_name = "StartedFinished"
|
||||
|
||||
var _ActionGroupEventType_index = [...]uint8{0, 7, 15}
|
||||
|
||||
func (i ActionGroupEventType) String() string {
|
||||
if i < 0 || i >= ActionGroupEventType(len(_ActionGroupEventType_index)-1) {
|
||||
return "ActionGroupEventType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _ActionGroupEventType_name[_ActionGroupEventType_index[i]:_ActionGroupEventType_index[i+1]]
|
||||
}
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=ApplyEventOperation"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
@ -11,16 +8,16 @@ func _() {
|
|||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[ServersideApplied-0]
|
||||
_ = x[Created-1]
|
||||
_ = x[Unchanged-2]
|
||||
_ = x[Configured-3]
|
||||
_ = x[Failed-4]
|
||||
_ = x[ApplyUnspecified-0]
|
||||
_ = x[ServersideApplied-1]
|
||||
_ = x[Created-2]
|
||||
_ = x[Unchanged-3]
|
||||
_ = x[Configured-4]
|
||||
}
|
||||
|
||||
const _ApplyEventOperation_name = "ServersideAppliedCreatedUnchangedConfiguredFailed"
|
||||
const _ApplyEventOperation_name = "ApplyUnspecifiedServersideAppliedCreatedUnchangedConfigured"
|
||||
|
||||
var _ApplyEventOperation_index = [...]uint8{0, 17, 24, 33, 43, 49}
|
||||
var _ApplyEventOperation_index = [...]uint8{0, 16, 33, 40, 49, 59}
|
||||
|
||||
func (i ApplyEventOperation) String() string {
|
||||
if i < 0 || i >= ApplyEventOperation(len(_ApplyEventOperation_index)-1) {
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=ApplyEventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[ApplyEventResourceUpdate-0]
|
||||
_ = x[ApplyEventCompleted-1]
|
||||
}
|
||||
|
||||
const _ApplyEventType_name = "ApplyEventResourceUpdateApplyEventCompleted"
|
||||
|
||||
var _ApplyEventType_index = [...]uint8{0, 24, 43}
|
||||
|
||||
func (i ApplyEventType) String() string {
|
||||
if i < 0 || i >= ApplyEventType(len(_ApplyEventType_index)-1) {
|
||||
return "ApplyEventType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _ApplyEventType_name[_ApplyEventType_index[i]:_ApplyEventType_index[i+1]]
|
||||
}
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=DeleteEventOperation"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
@ -11,13 +8,14 @@ func _() {
|
|||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Deleted-0]
|
||||
_ = x[DeleteSkipped-1]
|
||||
_ = x[DeleteUnspecified-0]
|
||||
_ = x[Deleted-1]
|
||||
_ = x[DeleteSkipped-2]
|
||||
}
|
||||
|
||||
const _DeleteEventOperation_name = "DeletedDeleteSkipped"
|
||||
const _DeleteEventOperation_name = "DeleteUnspecifiedDeletedDeleteSkipped"
|
||||
|
||||
var _DeleteEventOperation_index = [...]uint8{0, 7, 20}
|
||||
var _DeleteEventOperation_index = [...]uint8{0, 17, 24, 37}
|
||||
|
||||
func (i DeleteEventOperation) String() string {
|
||||
if i < 0 || i >= DeleteEventOperation(len(_DeleteEventOperation_index)-1) {
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=DeleteEventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[DeleteEventResourceUpdate-0]
|
||||
_ = x[DeleteEventCompleted-1]
|
||||
_ = x[DeleteEventFailed-2]
|
||||
}
|
||||
|
||||
const _DeleteEventType_name = "DeleteEventResourceUpdateDeleteEventCompletedDeleteEventFailed"
|
||||
|
||||
var _DeleteEventType_index = [...]uint8{0, 25, 45, 62}
|
||||
|
||||
func (i DeleteEventType) String() string {
|
||||
if i < 0 || i >= DeleteEventType(len(_DeleteEventType_index)-1) {
|
||||
return "DeleteEventType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _DeleteEventType_name[_DeleteEventType_index[i]:_DeleteEventType_index[i+1]]
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@ type Type int
|
|||
const (
|
||||
InitType Type = iota
|
||||
ErrorType
|
||||
ActionGroupType
|
||||
ApplyType
|
||||
StatusType
|
||||
PruneType
|
||||
|
|
@ -37,6 +38,11 @@ type Event struct {
|
|||
// ErrorEvent contains information about any errors encountered.
|
||||
ErrorEvent ErrorEvent
|
||||
|
||||
// ActionGroupEvent contains information about the progression of tasks
|
||||
// to apply, prune, and destroy resources, and tasks that involves waiting
|
||||
// for a set of resources to reach a specific state.
|
||||
ActionGroupEvent ActionGroupEvent
|
||||
|
||||
// ApplyEvent contains information about progress pertaining to
|
||||
// applying a resource to the cluster.
|
||||
ApplyEvent ApplyEvent
|
||||
|
|
@ -55,7 +61,7 @@ type Event struct {
|
|||
}
|
||||
|
||||
type InitEvent struct {
|
||||
ResourceGroups []ResourceGroup
|
||||
ActionGroups []ActionGroup
|
||||
}
|
||||
|
||||
//go:generate stringer -type=ResourceAction
|
||||
|
|
@ -64,9 +70,12 @@ type ResourceAction int
|
|||
const (
|
||||
ApplyAction ResourceAction = iota
|
||||
PruneAction
|
||||
DeleteAction
|
||||
WaitAction
|
||||
)
|
||||
|
||||
type ResourceGroup struct {
|
||||
type ActionGroup struct {
|
||||
Name string
|
||||
Action ResourceAction
|
||||
Identifiers []object.ObjMetadata
|
||||
}
|
||||
|
|
@ -75,92 +84,73 @@ type ErrorEvent struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
//go:generate stringer -type=ApplyEventType
|
||||
type ApplyEventType int
|
||||
//go:generate stringer -type=ActionGroupEventType
|
||||
type ActionGroupEventType int
|
||||
|
||||
const (
|
||||
ApplyEventResourceUpdate ApplyEventType = iota
|
||||
ApplyEventCompleted
|
||||
Started ActionGroupEventType = iota
|
||||
Finished
|
||||
)
|
||||
|
||||
type ActionGroupEvent struct {
|
||||
GroupName string
|
||||
Action ResourceAction
|
||||
Type ActionGroupEventType
|
||||
}
|
||||
|
||||
//go:generate stringer -type=ApplyEventOperation
|
||||
type ApplyEventOperation int
|
||||
|
||||
const (
|
||||
ServersideApplied ApplyEventOperation = iota
|
||||
ApplyUnspecified ApplyEventOperation = iota
|
||||
ServersideApplied
|
||||
Created
|
||||
Unchanged
|
||||
Configured
|
||||
Failed
|
||||
)
|
||||
|
||||
type ApplyEvent struct {
|
||||
Type ApplyEventType
|
||||
Operation ApplyEventOperation
|
||||
Object *unstructured.Unstructured
|
||||
Identifier object.ObjMetadata
|
||||
Operation ApplyEventOperation
|
||||
Resource *unstructured.Unstructured
|
||||
Error error
|
||||
}
|
||||
|
||||
//go:generate stringer -type=StatusEventType
|
||||
type StatusEventType int
|
||||
|
||||
const (
|
||||
StatusEventResourceUpdate StatusEventType = iota
|
||||
StatusEventCompleted
|
||||
)
|
||||
|
||||
type StatusEvent struct {
|
||||
Type StatusEventType
|
||||
Resource *pollevent.ResourceStatus
|
||||
Identifier object.ObjMetadata
|
||||
PollResourceInfo *pollevent.ResourceStatus
|
||||
Resource *unstructured.Unstructured
|
||||
Error error
|
||||
}
|
||||
|
||||
//go:generate stringer -type=PruneEventType
|
||||
type PruneEventType int
|
||||
|
||||
const (
|
||||
PruneEventResourceUpdate PruneEventType = iota
|
||||
PruneEventCompleted
|
||||
PruneEventFailed
|
||||
)
|
||||
|
||||
//go:generate stringer -type=PruneEventOperation
|
||||
type PruneEventOperation int
|
||||
|
||||
const (
|
||||
Pruned PruneEventOperation = iota
|
||||
PruneUnspecified PruneEventOperation = iota
|
||||
Pruned
|
||||
PruneSkipped
|
||||
)
|
||||
|
||||
type PruneEvent struct {
|
||||
Type PruneEventType
|
||||
Identifier object.ObjMetadata
|
||||
Operation PruneEventOperation
|
||||
Object *unstructured.Unstructured
|
||||
Identifier object.ObjMetadata
|
||||
Error error
|
||||
}
|
||||
|
||||
//go:generate stringer -type=DeleteEventType
|
||||
type DeleteEventType int
|
||||
|
||||
const (
|
||||
DeleteEventResourceUpdate DeleteEventType = iota
|
||||
DeleteEventCompleted
|
||||
DeleteEventFailed
|
||||
)
|
||||
|
||||
//go:generate stringer -type=DeleteEventOperation
|
||||
type DeleteEventOperation int
|
||||
|
||||
const (
|
||||
Deleted DeleteEventOperation = iota
|
||||
DeleteUnspecified DeleteEventOperation = iota
|
||||
Deleted
|
||||
DeleteSkipped
|
||||
)
|
||||
|
||||
type DeleteEvent struct {
|
||||
Type DeleteEventType
|
||||
Identifier object.ObjMetadata
|
||||
Operation DeleteEventOperation
|
||||
Object *unstructured.Unstructured
|
||||
Identifier object.ObjMetadata
|
||||
Error error
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=PruneEventOperation"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
@ -11,13 +8,14 @@ func _() {
|
|||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Pruned-0]
|
||||
_ = x[PruneSkipped-1]
|
||||
_ = x[PruneUnspecified-0]
|
||||
_ = x[Pruned-1]
|
||||
_ = x[PruneSkipped-2]
|
||||
}
|
||||
|
||||
const _PruneEventOperation_name = "PrunedPruneSkipped"
|
||||
const _PruneEventOperation_name = "PruneUnspecifiedPrunedPruneSkipped"
|
||||
|
||||
var _PruneEventOperation_index = [...]uint8{0, 6, 18}
|
||||
var _PruneEventOperation_index = [...]uint8{0, 16, 22, 34}
|
||||
|
||||
func (i PruneEventOperation) String() string {
|
||||
if i < 0 || i >= PruneEventOperation(len(_PruneEventOperation_index)-1) {
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=PruneEventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[PruneEventResourceUpdate-0]
|
||||
_ = x[PruneEventCompleted-1]
|
||||
_ = x[PruneEventFailed-2]
|
||||
}
|
||||
|
||||
const _PruneEventType_name = "PruneEventResourceUpdatePruneEventCompletedPruneEventFailed"
|
||||
|
||||
var _PruneEventType_index = [...]uint8{0, 24, 43, 59}
|
||||
|
||||
func (i PruneEventType) String() string {
|
||||
if i < 0 || i >= PruneEventType(len(_PruneEventType_index)-1) {
|
||||
return "PruneEventType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _PruneEventType_name[_PruneEventType_index[i]:_PruneEventType_index[i+1]]
|
||||
}
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=ResourceAction"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
@ -13,11 +10,13 @@ func _() {
|
|||
var x [1]struct{}
|
||||
_ = x[ApplyAction-0]
|
||||
_ = x[PruneAction-1]
|
||||
_ = x[DeleteAction-2]
|
||||
_ = x[WaitAction-3]
|
||||
}
|
||||
|
||||
const _ResourceAction_name = "ApplyActionPruneAction"
|
||||
const _ResourceAction_name = "ApplyActionPruneActionDeleteActionWaitAction"
|
||||
|
||||
var _ResourceAction_index = [...]uint8{0, 11, 22}
|
||||
var _ResourceAction_index = [...]uint8{0, 11, 22, 34, 44}
|
||||
|
||||
func (i ResourceAction) String() string {
|
||||
if i < 0 || i >= ResourceAction(len(_ResourceAction_index)-1) {
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=StatusEventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[StatusEventResourceUpdate-0]
|
||||
_ = x[StatusEventCompleted-1]
|
||||
}
|
||||
|
||||
const _StatusEventType_name = "StatusEventResourceUpdateStatusEventCompleted"
|
||||
|
||||
var _StatusEventType_index = [...]uint8{0, 25, 45}
|
||||
|
||||
func (i StatusEventType) String() string {
|
||||
if i < 0 || i >= StatusEventType(len(_StatusEventType_index)-1) {
|
||||
return "StatusEventType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _StatusEventType_name[_StatusEventType_index[i]:_StatusEventType_index[i+1]]
|
||||
}
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=Type"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
@ -13,15 +10,16 @@ func _() {
|
|||
var x [1]struct{}
|
||||
_ = x[InitType-0]
|
||||
_ = x[ErrorType-1]
|
||||
_ = x[ApplyType-2]
|
||||
_ = x[StatusType-3]
|
||||
_ = x[PruneType-4]
|
||||
_ = x[DeleteType-5]
|
||||
_ = x[ActionGroupType-2]
|
||||
_ = x[ApplyType-3]
|
||||
_ = x[StatusType-4]
|
||||
_ = x[PruneType-5]
|
||||
_ = x[DeleteType-6]
|
||||
}
|
||||
|
||||
const _Type_name = "InitTypeErrorTypeApplyTypeStatusTypePruneTypeDeleteType"
|
||||
const _Type_name = "InitTypeErrorTypeActionGroupTypeApplyTypeStatusTypePruneTypeDeleteType"
|
||||
|
||||
var _Type_index = [...]uint8{0, 8, 17, 26, 36, 45, 55}
|
||||
var _Type_index = [...]uint8{0, 8, 17, 32, 41, 51, 60, 70}
|
||||
|
||||
func (i Type) String() string {
|
||||
if i < 0 || i >= Type(len(_Type_index)-1) {
|
||||
|
|
|
|||
|
|
@ -250,7 +250,6 @@ func createPruneEvent(id object.ObjMetadata, obj *unstructured.Unstructured, op
|
|||
return event.Event{
|
||||
Type: event.PruneType,
|
||||
PruneEvent: event.PruneEvent{
|
||||
Type: event.PruneEventResourceUpdate,
|
||||
Operation: op,
|
||||
Object: obj,
|
||||
Identifier: id,
|
||||
|
|
@ -263,7 +262,6 @@ func createPruneFailedEvent(objMeta object.ObjMetadata, err error) event.Event {
|
|||
return event.Event{
|
||||
Type: event.PruneType,
|
||||
PruneEvent: event.PruneEvent{
|
||||
Type: event.PruneEventFailed,
|
||||
Identifier: objMeta,
|
||||
Error: err,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package solver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
|
@ -41,6 +42,31 @@ type TaskQueueSolver struct {
|
|||
Mapper meta.RESTMapper
|
||||
}
|
||||
|
||||
type TaskQueue struct {
|
||||
tasks []taskrunner.Task
|
||||
}
|
||||
|
||||
func (tq *TaskQueue) ToChannel() chan taskrunner.Task {
|
||||
taskQueue := make(chan taskrunner.Task, len(tq.tasks))
|
||||
for _, t := range tq.tasks {
|
||||
taskQueue <- t
|
||||
}
|
||||
return taskQueue
|
||||
}
|
||||
|
||||
func (tq *TaskQueue) ToActionGroups() []event.ActionGroup {
|
||||
var ags []event.ActionGroup
|
||||
|
||||
for _, t := range tq.tasks {
|
||||
ags = append(ags, event.ActionGroup{
|
||||
Name: t.Name(),
|
||||
Action: t.Action(),
|
||||
Identifiers: t.Identifiers(),
|
||||
})
|
||||
}
|
||||
return ags
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
ServerSideOptions common.ServerSideOptions
|
||||
ReconcileTimeout time.Duration
|
||||
|
|
@ -63,7 +89,11 @@ type resourceObjects interface {
|
|||
// and constructs the task queue. The options parameter allows
|
||||
// customization of how the task queue are built.
|
||||
func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
|
||||
o Options) chan taskrunner.Task {
|
||||
o Options) *TaskQueue {
|
||||
var applyCounter int
|
||||
var pruneCounter int
|
||||
var waitCounter int
|
||||
|
||||
var tasks []taskrunner.Task
|
||||
remainingInfos := ro.ObjsForApply()
|
||||
// Convert slice of previous inventory objects into a map.
|
||||
|
|
@ -76,6 +106,7 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
|
|||
crdSplitRes, hasCRDs := splitAfterCRDs(remainingInfos)
|
||||
if hasCRDs {
|
||||
tasks = append(tasks, &task.ApplyTask{
|
||||
TaskName: fmt.Sprintf("apply-%d", applyCounter),
|
||||
Objects: append(crdSplitRes.before, crdSplitRes.crds...),
|
||||
CRDs: crdSplitRes.crds,
|
||||
PrevInventory: prevInventory,
|
||||
|
|
@ -87,21 +118,24 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
|
|||
InventoryPolicy: o.InventoryPolicy,
|
||||
InvInfo: ro.Inventory(),
|
||||
})
|
||||
applyCounter += 1
|
||||
if !o.DryRunStrategy.ClientOrServerDryRun() {
|
||||
objs := object.UnstructuredsToObjMetas(crdSplitRes.crds)
|
||||
tasks = append(tasks, taskrunner.NewWaitTask(
|
||||
fmt.Sprintf("wait-%d", waitCounter),
|
||||
objs,
|
||||
taskrunner.AllCurrent,
|
||||
1*time.Minute),
|
||||
&task.ResetRESTMapperTask{
|
||||
Mapper: t.Mapper,
|
||||
})
|
||||
1*time.Minute,
|
||||
t.Mapper),
|
||||
)
|
||||
waitCounter += 1
|
||||
}
|
||||
remainingInfos = crdSplitRes.after
|
||||
}
|
||||
|
||||
tasks = append(tasks,
|
||||
&task.ApplyTask{
|
||||
TaskName: fmt.Sprintf("apply-%d", applyCounter),
|
||||
Objects: remainingInfos,
|
||||
CRDs: crdSplitRes.crds,
|
||||
PrevInventory: prevInventory,
|
||||
|
|
@ -113,36 +147,24 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
|
|||
InventoryPolicy: o.InventoryPolicy,
|
||||
InvInfo: ro.Inventory(),
|
||||
},
|
||||
&task.SendEventTask{
|
||||
Event: event.Event{
|
||||
Type: event.ApplyType,
|
||||
ApplyEvent: event.ApplyEvent{
|
||||
Type: event.ApplyEventCompleted,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if !o.DryRunStrategy.ClientOrServerDryRun() && o.ReconcileTimeout != time.Duration(0) {
|
||||
tasks = append(tasks,
|
||||
taskrunner.NewWaitTask(
|
||||
ro.IdsForApply(),
|
||||
fmt.Sprintf("wait-%d", waitCounter),
|
||||
object.UnstructuredsToObjMetas(remainingInfos),
|
||||
taskrunner.AllCurrent,
|
||||
o.ReconcileTimeout),
|
||||
&task.SendEventTask{
|
||||
Event: event.Event{
|
||||
Type: event.StatusType,
|
||||
StatusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventCompleted,
|
||||
},
|
||||
},
|
||||
},
|
||||
o.ReconcileTimeout,
|
||||
t.Mapper),
|
||||
)
|
||||
waitCounter += 1
|
||||
}
|
||||
|
||||
if o.Prune {
|
||||
tasks = append(tasks,
|
||||
&task.PruneTask{
|
||||
TaskName: fmt.Sprintf("prune-%d", pruneCounter),
|
||||
Objects: ro.ObjsForApply(),
|
||||
InventoryObject: ro.Inventory(),
|
||||
PruneOptions: t.PruneOptions,
|
||||
|
|
@ -150,43 +172,23 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
|
|||
DryRunStrategy: o.DryRunStrategy,
|
||||
InventoryPolicy: o.InventoryPolicy,
|
||||
},
|
||||
&task.SendEventTask{
|
||||
Event: event.Event{
|
||||
Type: event.PruneType,
|
||||
PruneEvent: event.PruneEvent{
|
||||
Type: event.PruneEventCompleted,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if !o.DryRunStrategy.ClientOrServerDryRun() && o.PruneTimeout != time.Duration(0) {
|
||||
tasks = append(tasks,
|
||||
taskrunner.NewWaitTask(
|
||||
fmt.Sprintf("wait-%d", waitCounter),
|
||||
ro.IdsForPrune(),
|
||||
taskrunner.AllNotFound,
|
||||
o.PruneTimeout),
|
||||
&task.SendEventTask{
|
||||
Event: event.Event{
|
||||
Type: event.StatusType,
|
||||
StatusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventCompleted,
|
||||
},
|
||||
},
|
||||
},
|
||||
o.PruneTimeout,
|
||||
t.Mapper),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return tasksToQueue(tasks)
|
||||
}
|
||||
|
||||
func tasksToQueue(tasks []taskrunner.Task) chan taskrunner.Task {
|
||||
taskQueue := make(chan taskrunner.Task, len(tasks))
|
||||
for _, t := range tasks {
|
||||
taskQueue <- t
|
||||
return &TaskQueue{
|
||||
tasks: tasks,
|
||||
}
|
||||
return taskQueue
|
||||
}
|
||||
|
||||
type crdSplitResult struct {
|
||||
|
|
|
|||
|
|
@ -38,8 +38,10 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
objs: []*unstructured.Unstructured{},
|
||||
options: Options{},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{},
|
||||
&task.SendEventTask{},
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{},
|
||||
},
|
||||
},
|
||||
},
|
||||
"single resource": {
|
||||
|
|
@ -49,11 +51,11 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
options: Options{},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
},
|
||||
},
|
||||
"multiple resources with wait": {
|
||||
|
|
@ -66,19 +68,20 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
customInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
taskrunner.NewWaitTask(
|
||||
"wait-0",
|
||||
[]object.ObjMetadata{
|
||||
ignoreErrInfoToObjMeta(depInfo),
|
||||
ignoreErrInfoToObjMeta(customInfo),
|
||||
},
|
||||
taskrunner.AllCurrent, 1*time.Second),
|
||||
&task.SendEventTask{},
|
||||
taskrunner.AllCurrent, 1*time.Second,
|
||||
testutil.NewFakeRESTMapper()),
|
||||
},
|
||||
},
|
||||
"multiple resources with wait and prune": {
|
||||
|
|
@ -92,21 +95,23 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
customInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
taskrunner.NewWaitTask(
|
||||
"wait-0",
|
||||
[]object.ObjMetadata{
|
||||
ignoreErrInfoToObjMeta(depInfo),
|
||||
ignoreErrInfoToObjMeta(customInfo),
|
||||
},
|
||||
taskrunner.AllCurrent, 1*time.Second),
|
||||
&task.SendEventTask{},
|
||||
&task.PruneTask{},
|
||||
&task.SendEventTask{},
|
||||
taskrunner.AllCurrent, 1*time.Second,
|
||||
testutil.NewFakeRESTMapper()),
|
||||
&task.PruneTask{
|
||||
TaskName: "prune-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple resources with wait, prune and dryrun": {
|
||||
|
|
@ -121,14 +126,15 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
customInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
&task.PruneTask{},
|
||||
&task.SendEventTask{},
|
||||
&task.PruneTask{
|
||||
TaskName: "prune-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple resources with wait, prune and server-dryrun": {
|
||||
|
|
@ -143,14 +149,15 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
customInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
&task.PruneTask{},
|
||||
&task.SendEventTask{},
|
||||
&task.PruneTask{
|
||||
TaskName: "prune-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple resources including CRD": {
|
||||
|
|
@ -163,29 +170,31 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
crdInfo,
|
||||
},
|
||||
},
|
||||
taskrunner.NewWaitTask(
|
||||
"wait-0",
|
||||
[]object.ObjMetadata{
|
||||
ignoreErrInfoToObjMeta(crdInfo),
|
||||
},
|
||||
taskrunner.AllCurrent, 1*time.Second),
|
||||
&task.ResetRESTMapperTask{},
|
||||
taskrunner.AllCurrent, 1*time.Second,
|
||||
testutil.NewFakeRESTMapper()),
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-1",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
taskrunner.NewWaitTask(
|
||||
"wait-1",
|
||||
[]object.ObjMetadata{
|
||||
ignoreErrInfoToObjMeta(crdInfo),
|
||||
ignoreErrInfoToObjMeta(depInfo),
|
||||
},
|
||||
taskrunner.AllCurrent, 1*time.Second),
|
||||
&task.SendEventTask{},
|
||||
taskrunner.AllCurrent, 1*time.Second,
|
||||
testutil.NewFakeRESTMapper()),
|
||||
},
|
||||
},
|
||||
"no wait with CRDs if it is a dryrun": {
|
||||
|
|
@ -199,16 +208,17 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
},
|
||||
expectedTasks: []taskrunner.Task{
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-0",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
crdInfo,
|
||||
},
|
||||
},
|
||||
&task.ApplyTask{
|
||||
TaskName: "apply-1",
|
||||
Objects: []*unstructured.Unstructured{
|
||||
depInfo,
|
||||
},
|
||||
},
|
||||
&task.SendEventTask{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -227,12 +237,11 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
idsForPrune: nil,
|
||||
}, tc.options)
|
||||
|
||||
tasks := queueToSlice(tq)
|
||||
|
||||
assert.Equal(t, len(tc.expectedTasks), len(tasks))
|
||||
assert.Equal(t, len(tc.expectedTasks), len(tq.tasks))
|
||||
for i, expTask := range tc.expectedTasks {
|
||||
actualTask := tasks[i]
|
||||
actualTask := tq.tasks[i]
|
||||
assert.Equal(t, getType(expTask), getType(actualTask))
|
||||
assert.Equal(t, expTask.Name(), actualTask.Name())
|
||||
|
||||
switch expTsk := expTask.(type) {
|
||||
case *task.ApplyTask:
|
||||
|
|
@ -244,9 +253,9 @@ func TestTaskQueueSolver_BuildTaskQueue(t *testing.T) {
|
|||
}
|
||||
case *taskrunner.WaitTask:
|
||||
actWaitTask := toWaitTask(t, actualTask)
|
||||
assert.Equal(t, len(expTsk.Identifiers), len(actWaitTask.Identifiers))
|
||||
for j, id := range expTsk.Identifiers {
|
||||
actID := actWaitTask.Identifiers[j]
|
||||
assert.Equal(t, len(expTsk.Ids), len(actWaitTask.Ids))
|
||||
for j, id := range expTsk.Ids {
|
||||
actID := actWaitTask.Ids[j]
|
||||
assert.Equal(t, id, actID)
|
||||
}
|
||||
}
|
||||
|
|
@ -292,18 +301,6 @@ func createInfo(apiVersion, kind, name, namespace string) *resource.Info {
|
|||
}
|
||||
}
|
||||
|
||||
func queueToSlice(tq chan taskrunner.Task) []taskrunner.Task {
|
||||
var tasks []taskrunner.Task
|
||||
for {
|
||||
select {
|
||||
case t := <-tq:
|
||||
tasks = append(tasks, t)
|
||||
default:
|
||||
return tasks
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getType(task taskrunner.Task) reflect.Type {
|
||||
return reflect.TypeOf(task)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,6 +46,8 @@ type applyOptions interface {
|
|||
// ApplyTask applies the given Objects to the cluster
|
||||
// by using the ApplyOptions.
|
||||
type ApplyTask struct {
|
||||
TaskName string
|
||||
|
||||
Factory util.Factory
|
||||
InfoHelper info.InfoHelper
|
||||
Mapper meta.RESTMapper
|
||||
|
|
@ -66,6 +68,18 @@ var applyOptionsFactoryFunc = newApplyOptions
|
|||
// getClusterObj gets the cluster object. Used for allow unit testing.
|
||||
var getClusterObj = getClusterObject
|
||||
|
||||
func (a *ApplyTask) Name() string {
|
||||
return a.TaskName
|
||||
}
|
||||
|
||||
func (a *ApplyTask) Action() event.ResourceAction {
|
||||
return event.ApplyAction
|
||||
}
|
||||
|
||||
func (a *ApplyTask) Identifiers() []object.ObjMetadata {
|
||||
return object.UnstructuredsToObjMetas(a.Objects)
|
||||
}
|
||||
|
||||
// Start creates a new goroutine that will invoke
|
||||
// the Run function on the ApplyOptions to update
|
||||
// the cluster. It will push a TaskResult on the taskChannel
|
||||
|
|
@ -140,8 +154,8 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
|
|||
klog.Errorf("unable to convert obj to info for %s/%s (%s)--continue",
|
||||
obj.GetNamespace(), obj.GetName(), err)
|
||||
}
|
||||
taskContext.EventChannel() <- createApplyEvent(
|
||||
id, event.Failed, applyerror.NewUnknownTypeError(err))
|
||||
taskContext.EventChannel() <- createApplyFailedEvent(id,
|
||||
applyerror.NewUnknownTypeError(err))
|
||||
taskContext.CaptureResourceFailure(id)
|
||||
continue
|
||||
}
|
||||
|
|
@ -153,15 +167,13 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
|
|||
klog.Errorf("error (%s) retrieving %s/%s from cluster--continue",
|
||||
err, info.Namespace, info.Name)
|
||||
}
|
||||
op := event.Failed
|
||||
if a.objInCluster(id) {
|
||||
// Object in cluster stays in the inventory.
|
||||
klog.V(4).Infof("%s/%s apply retrieval failure, but in cluster--keep in inventory",
|
||||
info.Namespace, info.Name)
|
||||
invInfos[id] = info
|
||||
op = event.Unchanged
|
||||
}
|
||||
taskContext.EventChannel() <- createApplyEvent(id, op, err)
|
||||
taskContext.EventChannel() <- createApplyFailedEvent(id, err)
|
||||
taskContext.CaptureResourceFailure(id)
|
||||
continue
|
||||
}
|
||||
|
|
@ -173,10 +185,12 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
|
|||
if !canApply {
|
||||
klog.V(5).Infof("can not apply %s/%s--continue",
|
||||
clusterObj.GetNamespace(), clusterObj.GetName())
|
||||
taskContext.EventChannel() <- createApplyEvent(
|
||||
id,
|
||||
event.Unchanged,
|
||||
err)
|
||||
if err != nil {
|
||||
taskContext.EventChannel() <- createApplyFailedEvent(id, err)
|
||||
} else {
|
||||
taskContext.EventChannel() <- createApplyEvent(id,
|
||||
event.Unchanged, clusterObj)
|
||||
}
|
||||
taskContext.CaptureResourceFailure(id)
|
||||
continue
|
||||
}
|
||||
|
|
@ -202,8 +216,8 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
|
|||
info.Namespace, info.Name)
|
||||
delete(invInfos, id)
|
||||
}
|
||||
taskContext.EventChannel() <- createApplyEvent(
|
||||
id, event.Failed, applyerror.NewApplyRunError(err))
|
||||
taskContext.EventChannel() <- createApplyFailedEvent(id,
|
||||
applyerror.NewApplyRunError(err))
|
||||
taskContext.CaptureResourceFailure(id)
|
||||
}
|
||||
}
|
||||
|
|
@ -373,12 +387,21 @@ func buildCRDsInfo(crds []*unstructured.Unstructured) *crdsInfo {
|
|||
func (a *ApplyTask) ClearTimeout() {}
|
||||
|
||||
// createApplyEvent is a helper function to package an apply event for a single resource.
|
||||
func createApplyEvent(id object.ObjMetadata, operation event.ApplyEventOperation, err error) event.Event {
|
||||
func createApplyEvent(id object.ObjMetadata, operation event.ApplyEventOperation, resource *unstructured.Unstructured) event.Event {
|
||||
return event.Event{
|
||||
Type: event.ApplyType,
|
||||
ApplyEvent: event.ApplyEvent{
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Identifier: id,
|
||||
Operation: operation,
|
||||
Resource: resource,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createApplyFailedEvent(id object.ObjMetadata, err error) event.Event {
|
||||
return event.Event{
|
||||
Type: event.ApplyType,
|
||||
ApplyEvent: event.ApplyEvent{
|
||||
Identifier: id,
|
||||
Error: err,
|
||||
},
|
||||
|
|
@ -390,8 +413,8 @@ func createApplyEvent(id object.ObjMetadata, operation event.ApplyEventOperation
|
|||
func sendBatchApplyEvents(taskContext *taskrunner.TaskContext, objects []*unstructured.Unstructured, err error) {
|
||||
for _, obj := range objects {
|
||||
id := object.UnstructuredToObjMeta(obj)
|
||||
taskContext.EventChannel() <- createApplyEvent(
|
||||
id, event.Failed, applyerror.NewInitializeApplyOptionError(err))
|
||||
taskContext.EventChannel() <- createApplyFailedEvent(id,
|
||||
applyerror.NewInitializeApplyOptionError(err))
|
||||
taskContext.CaptureResourceFailure(id)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,10 +36,9 @@ func (r *resourcePrinterImpl) PrintObj(obj runtime.Object, _ io.Writer) error {
|
|||
r.ch <- event.Event{
|
||||
Type: event.ApplyType,
|
||||
ApplyEvent: event.ApplyEvent{
|
||||
Type: event.ApplyEventResourceUpdate,
|
||||
Operation: r.applyOperation,
|
||||
Object: obj.(*unstructured.Unstructured),
|
||||
Identifier: object.RuntimeToObjMeta(obj),
|
||||
Operation: r.applyOperation,
|
||||
Resource: obj.(*unstructured.Unstructured),
|
||||
},
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -50,5 +50,5 @@ func TestKubectlPrinterAdapter(t *testing.T) {
|
|||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, event.ServersideApplied, msg.ApplyEvent.Operation)
|
||||
assert.Equal(t, deployment, msg.ApplyEvent.Object)
|
||||
assert.Equal(t, deployment, msg.ApplyEvent.Resource)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,16 +6,20 @@ package task
|
|||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/prune"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/taskrunner"
|
||||
"sigs.k8s.io/cli-utils/pkg/common"
|
||||
"sigs.k8s.io/cli-utils/pkg/inventory"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
)
|
||||
|
||||
// PruneTask prunes objects from the cluster
|
||||
// by using the PruneOptions. The provided Objects is the
|
||||
// set of resources that have just been applied.
|
||||
type PruneTask struct {
|
||||
TaskName string
|
||||
|
||||
PruneOptions *prune.PruneOptions
|
||||
InventoryObject inventory.InventoryInfo
|
||||
Objects []*unstructured.Unstructured
|
||||
|
|
@ -24,6 +28,18 @@ type PruneTask struct {
|
|||
InventoryPolicy inventory.InventoryPolicy
|
||||
}
|
||||
|
||||
func (p *PruneTask) Name() string {
|
||||
return p.TaskName
|
||||
}
|
||||
|
||||
func (p *PruneTask) Action() event.ResourceAction {
|
||||
return event.PruneAction
|
||||
}
|
||||
|
||||
func (p *PruneTask) Identifiers() []object.ObjMetadata {
|
||||
return object.UnstructuredsToObjMetas(p.Objects)
|
||||
}
|
||||
|
||||
// Start creates a new goroutine that will invoke
|
||||
// the Run function on the PruneOptions to update
|
||||
// the cluster. It will push a TaskResult on the taskChannel
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func getGeneration(r *event.ResourceStatus) int64 {
|
|||
}
|
||||
|
||||
// conditionMet tests whether the provided Condition holds true for
|
||||
// all resources given by the list of Identifiers.
|
||||
// all resources given by the list of Ids.
|
||||
func (a *resourceStatusCollector) conditionMet(rwd []resourceWaitData, c Condition) bool {
|
||||
switch c {
|
||||
case AllCurrent:
|
||||
|
|
@ -75,7 +75,7 @@ func (a *resourceStatusCollector) conditionMet(rwd []resourceWaitData, c Conditi
|
|||
}
|
||||
|
||||
// allMatchStatus checks whether all resources given by the
|
||||
// Identifiers parameter has the provided status.
|
||||
// Ids parameter has the provided status.
|
||||
func (a *resourceStatusCollector) allMatchStatus(rwd []resourceWaitData, s status.Status) bool {
|
||||
for _, wd := range rwd {
|
||||
ri, found := a.resourceMap[wd.identifier]
|
||||
|
|
@ -90,7 +90,7 @@ func (a *resourceStatusCollector) allMatchStatus(rwd []resourceWaitData, s statu
|
|||
}
|
||||
|
||||
// noneMatchStatus checks whether none of the resources given
|
||||
// by the Identifiers parameters has the provided status.
|
||||
// by the Ids parameters has the provided status.
|
||||
func (a *resourceStatusCollector) noneMatchStatus(rwd []resourceWaitData, s status.Status) bool {
|
||||
for _, wd := range rwd {
|
||||
ri, found := a.resourceMap[wd.identifier]
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ func (b *baseRunner) run(ctx context.Context, taskQueue chan Task,
|
|||
// If the statusChannel has closed or we are preparing
|
||||
// to abort the task processing, we just ignore all
|
||||
// statusEvents.
|
||||
// TODO(mortent): Check if a losed statusChannel might
|
||||
// TODO(mortent): Check if a closed statusChannel might
|
||||
// create a busy loop here.
|
||||
if !ok || abort {
|
||||
continue
|
||||
|
|
@ -183,8 +183,10 @@ func (b *baseRunner) run(ctx context.Context, taskQueue chan Task,
|
|||
eventChannel <- event.Event{
|
||||
Type: event.StatusType,
|
||||
StatusEvent: event.StatusEvent{
|
||||
Type: event.StatusEventResourceUpdate,
|
||||
Resource: statusEvent.Resource,
|
||||
Identifier: statusEvent.Resource.Identifier,
|
||||
PollResourceInfo: statusEvent.Resource,
|
||||
Resource: statusEvent.Resource.Resource,
|
||||
Error: statusEvent.Error,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -208,6 +210,14 @@ func (b *baseRunner) run(ctx context.Context, taskQueue chan Task,
|
|||
// If everything is ok, we fetch and start the next task.
|
||||
case msg := <-taskContext.TaskChannel():
|
||||
currentTask.ClearTimeout()
|
||||
taskContext.EventChannel() <- event.Event{
|
||||
Type: event.ActionGroupType,
|
||||
ActionGroupEvent: event.ActionGroupEvent{
|
||||
GroupName: currentTask.Name(),
|
||||
Action: currentTask.Action(),
|
||||
Type: event.Finished,
|
||||
},
|
||||
}
|
||||
if msg.Err != nil {
|
||||
b.amendTimeoutError(msg.Err)
|
||||
return msg.Err
|
||||
|
|
@ -277,6 +287,15 @@ func (b *baseRunner) nextTask(taskQueue chan Task,
|
|||
return nil, true
|
||||
}
|
||||
|
||||
taskContext.EventChannel() <- event.Event{
|
||||
Type: event.ActionGroupType,
|
||||
ActionGroupEvent: event.ActionGroupEvent{
|
||||
GroupName: tsk.Name(),
|
||||
Action: tsk.Action(),
|
||||
Type: event.Started,
|
||||
},
|
||||
}
|
||||
|
||||
switch st := tsk.(type) {
|
||||
case *WaitTask:
|
||||
// The wait tasks need to be handled specifically here. Before
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
pollevent "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -50,15 +51,15 @@ func TestBaseRunner(t *testing.T) {
|
|||
"wait task runs until condition is met": {
|
||||
identifiers: []object.ObjMetadata{depID, cmID},
|
||||
tasks: []Task{
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.ApplyType,
|
||||
},
|
||||
duration: 3 * time.Second,
|
||||
},
|
||||
NewWaitTask([]object.ObjMetadata{depID, cmID}, AllCurrent,
|
||||
1*time.Minute),
|
||||
&busyTask{
|
||||
NewWaitTask("wait", []object.ObjMetadata{depID, cmID}, AllCurrent,
|
||||
1*time.Minute, testutil.NewFakeRESTMapper()),
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
|
|
@ -83,17 +84,23 @@ func TestBaseRunner(t *testing.T) {
|
|||
},
|
||||
},
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ApplyType,
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
event.StatusType,
|
||||
event.StatusType,
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
event.PruneType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
"wait task times out eventually": {
|
||||
identifiers: []object.ObjMetadata{depID, cmID},
|
||||
tasks: []Task{
|
||||
NewWaitTask([]object.ObjMetadata{depID, cmID}, AllCurrent,
|
||||
2*time.Second),
|
||||
NewWaitTask("wait", []object.ObjMetadata{depID, cmID}, AllCurrent,
|
||||
2*time.Second, testutil.NewFakeRESTMapper()),
|
||||
},
|
||||
statusEventsDelay: time.Second,
|
||||
statusEvents: []pollevent.Event{
|
||||
|
|
@ -119,25 +126,25 @@ func TestBaseRunner(t *testing.T) {
|
|||
"tasks run in order": {
|
||||
identifiers: []object.ObjMetadata{},
|
||||
tasks: []Task{
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.ApplyType,
|
||||
},
|
||||
duration: 1 * time.Second,
|
||||
},
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
duration: 1 * time.Second,
|
||||
},
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.ApplyType,
|
||||
},
|
||||
duration: 1 * time.Second,
|
||||
},
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
|
|
@ -147,10 +154,18 @@ func TestBaseRunner(t *testing.T) {
|
|||
statusEventsDelay: 1 * time.Second,
|
||||
statusEvents: []pollevent.Event{},
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ApplyType,
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
event.PruneType,
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
event.ApplyType,
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
event.PruneType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -235,13 +250,13 @@ func TestBaseRunnerCancellation(t *testing.T) {
|
|||
"cancellation while custom task is running": {
|
||||
identifiers: []object.ObjMetadata{depID},
|
||||
tasks: []Task{
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.ApplyType,
|
||||
},
|
||||
duration: 4 * time.Second,
|
||||
},
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
|
|
@ -250,34 +265,40 @@ func TestBaseRunnerCancellation(t *testing.T) {
|
|||
},
|
||||
contextTimeout: 2 * time.Second,
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ApplyType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
"cancellation while wait task is running": {
|
||||
identifiers: []object.ObjMetadata{depID},
|
||||
tasks: []Task{
|
||||
NewWaitTask([]object.ObjMetadata{depID}, AllCurrent, 20*time.Second),
|
||||
&busyTask{
|
||||
NewWaitTask("wait", []object.ObjMetadata{depID}, AllCurrent,
|
||||
20*time.Second, testutil.NewFakeRESTMapper()),
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
duration: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
contextTimeout: 2 * time.Second,
|
||||
expectedEventTypes: []event.Type{},
|
||||
contextTimeout: 2 * time.Second,
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
"error while custom task is running": {
|
||||
identifiers: []object.ObjMetadata{depID},
|
||||
tasks: []Task{
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.ApplyType,
|
||||
},
|
||||
duration: 2 * time.Second,
|
||||
err: testError,
|
||||
},
|
||||
&busyTask{
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
|
|
@ -287,14 +308,17 @@ func TestBaseRunnerCancellation(t *testing.T) {
|
|||
contextTimeout: 30 * time.Second,
|
||||
expectedError: testError,
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ApplyType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
"error from status poller while wait task is running": {
|
||||
identifiers: []object.ObjMetadata{depID},
|
||||
tasks: []Task{
|
||||
NewWaitTask([]object.ObjMetadata{depID}, AllCurrent, 20*time.Second),
|
||||
&busyTask{
|
||||
NewWaitTask("wait", []object.ObjMetadata{depID}, AllCurrent,
|
||||
20*time.Second, testutil.NewFakeRESTMapper()),
|
||||
&fakeApplyTask{
|
||||
resultEvent: event.Event{
|
||||
Type: event.PruneType,
|
||||
},
|
||||
|
|
@ -308,9 +332,12 @@ func TestBaseRunnerCancellation(t *testing.T) {
|
|||
Error: testError,
|
||||
},
|
||||
},
|
||||
contextTimeout: 30 * time.Second,
|
||||
expectedError: testError,
|
||||
expectedEventTypes: []event.Type{},
|
||||
contextTimeout: 30 * time.Second,
|
||||
expectedError: testError,
|
||||
expectedEventTypes: []event.Type{
|
||||
event.ActionGroupType,
|
||||
event.ActionGroupType,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -379,20 +406,33 @@ func TestBaseRunnerCancellation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type busyTask struct {
|
||||
type fakeApplyTask struct {
|
||||
name string
|
||||
resultEvent event.Event
|
||||
duration time.Duration
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *busyTask) Start(taskContext *TaskContext) {
|
||||
func (f *fakeApplyTask) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *fakeApplyTask) Action() event.ResourceAction {
|
||||
return event.ApplyAction
|
||||
}
|
||||
|
||||
func (f *fakeApplyTask) Identifiers() []object.ObjMetadata {
|
||||
return []object.ObjMetadata{}
|
||||
}
|
||||
|
||||
func (f *fakeApplyTask) Start(taskContext *TaskContext) {
|
||||
go func() {
|
||||
<-time.NewTimer(b.duration).C
|
||||
taskContext.EventChannel() <- b.resultEvent
|
||||
<-time.NewTimer(f.duration).C
|
||||
taskContext.EventChannel() <- f.resultEvent
|
||||
taskContext.TaskChannel() <- TaskResult{
|
||||
Err: b.err,
|
||||
Err: f.err,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *busyTask) ClearTimeout() {}
|
||||
func (f *fakeApplyTask) ClearTimeout() {}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,15 @@
|
|||
package taskrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
)
|
||||
|
|
@ -13,23 +20,28 @@ import (
|
|||
// Task is the interface that must be implemented by
|
||||
// all tasks that will be executed by the taskrunner.
|
||||
type Task interface {
|
||||
Name() string
|
||||
Action() event.ResourceAction
|
||||
Identifiers() []object.ObjMetadata
|
||||
Start(taskContext *TaskContext)
|
||||
ClearTimeout()
|
||||
}
|
||||
|
||||
// NewWaitTask creates a new wait task where we will wait until
|
||||
// the resources specifies by ids all meet the specified condition.
|
||||
func NewWaitTask(ids []object.ObjMetadata, cond Condition, timeout time.Duration) *WaitTask {
|
||||
func NewWaitTask(name string, ids []object.ObjMetadata, cond Condition, timeout time.Duration, mapper meta.RESTMapper) *WaitTask {
|
||||
// Create the token channel and only add one item.
|
||||
tokenChannel := make(chan struct{}, 1)
|
||||
tokenChannel <- struct{}{}
|
||||
|
||||
return &WaitTask{
|
||||
Identifiers: ids,
|
||||
Condition: cond,
|
||||
Timeout: timeout,
|
||||
name: name,
|
||||
Ids: ids,
|
||||
Condition: cond,
|
||||
Timeout: timeout,
|
||||
|
||||
token: tokenChannel,
|
||||
mapper: mapper,
|
||||
token: tokenChannel,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -41,14 +53,18 @@ func NewWaitTask(ids []object.ObjMetadata, cond Condition, timeout time.Duration
|
|||
// is handled in a special way to the taskrunner and is a part of the core
|
||||
// package.
|
||||
type WaitTask struct {
|
||||
// Identifiers is the list of resources that we are waiting for.
|
||||
Identifiers []object.ObjMetadata
|
||||
// name allows providing a name for the task.
|
||||
name string
|
||||
// Ids is the list of resources that we are waiting for.
|
||||
Ids []object.ObjMetadata
|
||||
// Condition defines the status we want all resources to reach
|
||||
Condition Condition
|
||||
// Timeout defines how long we are willing to wait for the condition
|
||||
// to be met.
|
||||
Timeout time.Duration
|
||||
|
||||
mapper meta.RESTMapper
|
||||
|
||||
// cancelFunc is a function that will cancel the timeout timer
|
||||
// on the task.
|
||||
cancelFunc func()
|
||||
|
|
@ -62,6 +78,18 @@ type WaitTask struct {
|
|||
token chan struct{}
|
||||
}
|
||||
|
||||
func (w *WaitTask) Name() string {
|
||||
return w.name
|
||||
}
|
||||
|
||||
func (w *WaitTask) Action() event.ResourceAction {
|
||||
return event.WaitAction
|
||||
}
|
||||
|
||||
func (w *WaitTask) Identifiers() []object.ObjMetadata {
|
||||
return w.Ids
|
||||
}
|
||||
|
||||
// Start kicks off the task. For the wait task, this just means
|
||||
// setting up the timeout timer.
|
||||
func (w *WaitTask) Start(taskContext *TaskContext) {
|
||||
|
|
@ -84,7 +112,7 @@ func (w *WaitTask) setTimer(taskContext *TaskContext) {
|
|||
case <-w.token:
|
||||
taskContext.TaskChannel() <- TaskResult{
|
||||
Err: &TimeoutError{
|
||||
Identifiers: w.Identifiers,
|
||||
Identifiers: w.Ids,
|
||||
Timeout: w.Timeout,
|
||||
Condition: w.Condition,
|
||||
},
|
||||
|
|
@ -111,7 +139,7 @@ func (w *WaitTask) checkCondition(taskContext *TaskContext, coll *resourceStatus
|
|||
// was applied.
|
||||
func (w *WaitTask) computeResourceWaitData(taskContext *TaskContext) []resourceWaitData {
|
||||
var rwd []resourceWaitData
|
||||
for _, id := range w.Identifiers {
|
||||
for _, id := range w.Ids {
|
||||
if taskContext.ResourceFailed(id) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -137,11 +165,27 @@ func (w *WaitTask) startAndComplete(taskContext *TaskContext) {
|
|||
// for the task has been met, or something has failed so the task
|
||||
// need to be stopped.
|
||||
func (w *WaitTask) complete(taskContext *TaskContext) {
|
||||
var err error
|
||||
for _, obj := range w.Ids {
|
||||
if (obj.GroupKind.Group == v1.SchemeGroupVersion.Group ||
|
||||
obj.GroupKind.Group == v1beta1.SchemeGroupVersion.Group) &&
|
||||
obj.GroupKind.Kind == "CustomResourceDefinition" {
|
||||
ddRESTMapper, err := extractDeferredDiscoveryRESTMapper(w.mapper)
|
||||
if err == nil {
|
||||
ddRESTMapper.Reset()
|
||||
// We only need to reset once.
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
select {
|
||||
// Only do something if we can get the token.
|
||||
case <-w.token:
|
||||
go func() {
|
||||
taskContext.TaskChannel() <- TaskResult{}
|
||||
taskContext.TaskChannel() <- TaskResult{
|
||||
Err: err,
|
||||
}
|
||||
}()
|
||||
default:
|
||||
return
|
||||
|
|
@ -185,3 +229,20 @@ func (c Condition) Meets(s status.Status) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// extractDeferredDiscoveryRESTMapper unwraps the provided RESTMapper
|
||||
// interface to get access to the underlying DeferredDiscoveryRESTMapper
|
||||
// that can be reset.
|
||||
func extractDeferredDiscoveryRESTMapper(mapper meta.RESTMapper) (*restmapper.DeferredDiscoveryRESTMapper,
|
||||
error) {
|
||||
val := reflect.ValueOf(mapper)
|
||||
if val.Type().Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("unexpected RESTMapper type: %s", val.Type().String())
|
||||
}
|
||||
fv := val.FieldByName("RESTMapper")
|
||||
ddRESTMapper, ok := fv.Interface().(*restmapper.DeferredDiscoveryRESTMapper)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected RESTMapper type")
|
||||
}
|
||||
return ddRESTMapper, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,10 +10,12 @@ import (
|
|||
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestWaitTask_TimeoutTriggered(t *testing.T) {
|
||||
task := NewWaitTask([]object.ObjMetadata{}, AllCurrent, 2*time.Second)
|
||||
task := NewWaitTask("wait", []object.ObjMetadata{}, AllCurrent,
|
||||
2*time.Second, testutil.NewFakeRESTMapper())
|
||||
|
||||
eventChannel := make(chan event.Event)
|
||||
taskContext := NewTaskContext(eventChannel)
|
||||
|
|
@ -35,7 +37,8 @@ func TestWaitTask_TimeoutTriggered(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitTask_TimeoutCancelled(t *testing.T) {
|
||||
task := NewWaitTask([]object.ObjMetadata{}, AllCurrent, 2*time.Second)
|
||||
task := NewWaitTask("wait", []object.ObjMetadata{}, AllCurrent,
|
||||
2*time.Second, testutil.NewFakeRESTMapper())
|
||||
|
||||
eventChannel := make(chan event.Event)
|
||||
taskContext := NewTaskContext(eventChannel)
|
||||
|
|
@ -54,7 +57,8 @@ func TestWaitTask_TimeoutCancelled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitTask_SingleTaskResult(t *testing.T) {
|
||||
task := NewWaitTask([]object.ObjMetadata{}, AllCurrent, 2*time.Second)
|
||||
task := NewWaitTask("wait", []object.ObjMetadata{}, AllCurrent,
|
||||
2*time.Second, testutil.NewFakeRESTMapper())
|
||||
|
||||
eventChannel := make(chan event.Event)
|
||||
taskContext := NewTaskContext(eventChannel)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated by "stringer -type=EventType"; DO NOT EDIT.
|
||||
|
||||
package event
|
||||
|
|
|
|||
|
|
@ -62,6 +62,19 @@ type ObjMetadata struct {
|
|||
GroupKind schema.GroupKind
|
||||
}
|
||||
|
||||
// ObjMetas is a slice of ObjMetadata.
|
||||
type ObjMetas []ObjMetadata
|
||||
|
||||
// Contains checks if the provided ObjMetadata exists in the ObjMetas slice.
|
||||
func (oms ObjMetas) Contains(id ObjMetadata) bool {
|
||||
for _, om := range oms {
|
||||
if om == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateObjMetadata returns a pointer to an ObjMetadata struct filled
|
||||
// with the passed values. This function normalizes and validates the
|
||||
// passed fields and returns an error for bad parameters.
|
||||
|
|
|
|||
|
|
@ -13,11 +13,12 @@ import (
|
|||
)
|
||||
|
||||
type Formatter interface {
|
||||
FormatApplyEvent(ae event.ApplyEvent, as *ApplyStats, c Collector) error
|
||||
FormatStatusEvent(se event.StatusEvent, sc Collector) error
|
||||
FormatPruneEvent(pe event.PruneEvent, ps *PruneStats) error
|
||||
FormatDeleteEvent(de event.DeleteEvent, ds *DeleteStats) error
|
||||
FormatApplyEvent(ae event.ApplyEvent) error
|
||||
FormatStatusEvent(se event.StatusEvent) error
|
||||
FormatPruneEvent(pe event.PruneEvent) error
|
||||
FormatDeleteEvent(de event.DeleteEvent) error
|
||||
FormatErrorEvent(ee event.ErrorEvent) error
|
||||
FormatActionGroupEvent(age event.ActionGroupEvent, ags []event.ActionGroup, as *ApplyStats, ps *PruneStats, ds *DeleteStats, c Collector) error
|
||||
}
|
||||
|
||||
type FormatterFactory func(ioStreams genericclioptions.IOStreams,
|
||||
|
|
@ -38,6 +39,7 @@ type ApplyStats struct {
|
|||
|
||||
func (a *ApplyStats) inc(op event.ApplyEventOperation) {
|
||||
switch op {
|
||||
case event.ApplyUnspecified:
|
||||
case event.ServersideApplied:
|
||||
a.ServersideApplied++
|
||||
case event.Created:
|
||||
|
|
@ -46,13 +48,15 @@ func (a *ApplyStats) inc(op event.ApplyEventOperation) {
|
|||
a.Unchanged++
|
||||
case event.Configured:
|
||||
a.Configured++
|
||||
case event.Failed:
|
||||
a.Failed++
|
||||
default:
|
||||
panic(fmt.Errorf("unknown apply operation %s", op.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApplyStats) incFailed() {
|
||||
a.Failed++
|
||||
}
|
||||
|
||||
func (a *ApplyStats) Sum() int {
|
||||
return a.ServersideApplied + a.Configured + a.Unchanged + a.Created + a.Failed
|
||||
}
|
||||
|
|
@ -113,69 +117,89 @@ func (sc *StatusCollector) LatestStatus() map[object.ObjMetadata]event.StatusEve
|
|||
// format on StdOut. As we support other printer implementations
|
||||
// this should probably be an interface.
|
||||
// This function will block until the channel is closed.
|
||||
//nolint:gocyclo
|
||||
func (b *BaseListPrinter) Print(ch <-chan event.Event, previewStrategy common.DryRunStrategy) error {
|
||||
var actionGroups []event.ActionGroup
|
||||
applyStats := &ApplyStats{}
|
||||
pruneStats := &PruneStats{}
|
||||
deleteStats := &DeleteStats{}
|
||||
statusCollector := &StatusCollector{
|
||||
latestStatus: make(map[object.ObjMetadata]event.StatusEvent),
|
||||
}
|
||||
printStatus := false
|
||||
pruneStats := &PruneStats{}
|
||||
deleteStats := &DeleteStats{}
|
||||
formatter := b.FormatterFactory(b.IOStreams, previewStrategy)
|
||||
for e := range ch {
|
||||
switch e.Type {
|
||||
case event.InitType:
|
||||
actionGroups = e.InitEvent.ActionGroups
|
||||
case event.ErrorType:
|
||||
_ = formatter.FormatErrorEvent(e.ErrorEvent)
|
||||
return e.ErrorEvent.Err
|
||||
case event.ApplyType:
|
||||
if e.ApplyEvent.Type == event.ApplyEventResourceUpdate {
|
||||
applyStats.inc(e.ApplyEvent.Operation)
|
||||
applyStats.inc(e.ApplyEvent.Operation)
|
||||
if e.ApplyEvent.Error != nil {
|
||||
applyStats.incFailed()
|
||||
}
|
||||
if e.ApplyEvent.Type == event.ApplyEventCompleted {
|
||||
printStatus = true
|
||||
}
|
||||
if err := formatter.FormatApplyEvent(e.ApplyEvent, applyStats, statusCollector); err != nil {
|
||||
if err := formatter.FormatApplyEvent(e.ApplyEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
case event.StatusType:
|
||||
if se := e.StatusEvent; se.Type == event.StatusEventResourceUpdate {
|
||||
statusCollector.updateStatus(e.StatusEvent.Resource.Identifier, e.StatusEvent)
|
||||
if printStatus {
|
||||
if err := formatter.FormatStatusEvent(e.StatusEvent, statusCollector); err != nil {
|
||||
return err
|
||||
}
|
||||
statusCollector.updateStatus(e.StatusEvent.Identifier, e.StatusEvent)
|
||||
if printStatus {
|
||||
if err := formatter.FormatStatusEvent(e.StatusEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case event.PruneType:
|
||||
if e.PruneEvent.Type == event.PruneEventResourceUpdate {
|
||||
switch e.PruneEvent.Operation {
|
||||
case event.Pruned:
|
||||
pruneStats.incPruned()
|
||||
case event.PruneSkipped:
|
||||
pruneStats.incSkipped()
|
||||
}
|
||||
switch e.PruneEvent.Operation {
|
||||
case event.Pruned:
|
||||
pruneStats.incPruned()
|
||||
case event.PruneSkipped:
|
||||
pruneStats.incSkipped()
|
||||
}
|
||||
if e.PruneEvent.Type == event.PruneEventFailed {
|
||||
if e.PruneEvent.Error != nil {
|
||||
pruneStats.incFailed()
|
||||
}
|
||||
if err := formatter.FormatPruneEvent(e.PruneEvent, pruneStats); err != nil {
|
||||
if err := formatter.FormatPruneEvent(e.PruneEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
case event.DeleteType:
|
||||
if e.DeleteEvent.Type == event.DeleteEventResourceUpdate {
|
||||
switch e.DeleteEvent.Operation {
|
||||
case event.Deleted:
|
||||
deleteStats.incDeleted()
|
||||
case event.DeleteSkipped:
|
||||
deleteStats.incSkipped()
|
||||
}
|
||||
switch e.DeleteEvent.Operation {
|
||||
case event.Deleted:
|
||||
deleteStats.incDeleted()
|
||||
case event.DeleteSkipped:
|
||||
deleteStats.incSkipped()
|
||||
}
|
||||
if e.DeleteEvent.Type == event.DeleteEventFailed {
|
||||
if e.DeleteEvent.Error != nil {
|
||||
deleteStats.incFailed()
|
||||
}
|
||||
if err := formatter.FormatDeleteEvent(e.DeleteEvent, deleteStats); err != nil {
|
||||
if err := formatter.FormatDeleteEvent(e.DeleteEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
case event.ActionGroupType:
|
||||
if err := formatter.FormatActionGroupEvent(e.ActionGroupEvent, actionGroups, applyStats,
|
||||
pruneStats, deleteStats, statusCollector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch e.ActionGroupEvent.Action {
|
||||
case event.ApplyAction:
|
||||
if e.ActionGroupEvent.Type == event.Started {
|
||||
applyStats = &ApplyStats{}
|
||||
}
|
||||
case event.PruneAction:
|
||||
if e.ActionGroupEvent.Type == event.Started {
|
||||
pruneStats = &PruneStats{}
|
||||
}
|
||||
case event.DeleteAction:
|
||||
if e.ActionGroupEvent.Type == event.Started {
|
||||
deleteStats = &DeleteStats{}
|
||||
}
|
||||
case event.WaitAction:
|
||||
if e.ActionGroupEvent.Type == event.Started {
|
||||
printStatus = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failedSum := applyStats.Failed + pruneStats.Failed + deleteStats.Failed
|
||||
|
|
@ -184,3 +208,12 @@ func (b *BaseListPrinter) Print(ch <-chan event.Event, previewStrategy common.Dr
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ActionGroupByName(name string, ags []event.ActionGroup) (event.ActionGroup, bool) {
|
||||
for _, ag := range ags {
|
||||
if ag.Name == name {
|
||||
return ag, true
|
||||
}
|
||||
}
|
||||
return event.ActionGroup{}, false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,185 @@
|
|||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
)
|
||||
|
||||
type ExpEvent struct {
|
||||
EventType event.Type
|
||||
|
||||
ActionGroupEvent *ExpActionGroupEvent
|
||||
ApplyEvent *ExpApplyEvent
|
||||
StatusEvent *ExpStatusEvent
|
||||
PruneEvent *ExpPruneEvent
|
||||
DeleteEvent *ExpDeleteEvent
|
||||
}
|
||||
|
||||
type ExpActionGroupEvent struct {
|
||||
Name string
|
||||
Action event.ResourceAction
|
||||
}
|
||||
|
||||
type ExpApplyEvent struct {
|
||||
Operation event.ApplyEventOperation
|
||||
Identifier object.ObjMetadata
|
||||
Error error
|
||||
}
|
||||
|
||||
type ExpStatusEvent struct {
|
||||
Identifier object.ObjMetadata
|
||||
Status status.Status
|
||||
Error error
|
||||
}
|
||||
|
||||
type ExpPruneEvent struct {
|
||||
Operation event.PruneEventOperation
|
||||
Identifier object.ObjMetadata
|
||||
Error error
|
||||
}
|
||||
|
||||
type ExpDeleteEvent struct {
|
||||
Operation event.DeleteEventOperation
|
||||
Identifier object.ObjMetadata
|
||||
Error error
|
||||
}
|
||||
|
||||
func VerifyEvents(expEvents []ExpEvent, events []event.Event) error {
|
||||
expEventIndex := 0
|
||||
for i := range events {
|
||||
e := events[i]
|
||||
ee := expEvents[expEventIndex]
|
||||
if isMatch(ee, e) {
|
||||
expEventIndex += 1
|
||||
if expEventIndex >= len(expEvents) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("event %s not found", expEvents[expEventIndex].EventType)
|
||||
}
|
||||
|
||||
var nilIdentifier = object.ObjMetadata{}
|
||||
|
||||
// nolint:gocyclo
|
||||
// TODO(mortent): This function is pretty complex and with quite a bit of
|
||||
// duplication. We should see if there is a better way to provide a flexible
|
||||
// way to verify that we go the expected events.
|
||||
func isMatch(ee ExpEvent, e event.Event) bool {
|
||||
if ee.EventType != e.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
// nolint:gocritic
|
||||
switch e.Type {
|
||||
case event.ActionGroupType:
|
||||
agee := ee.ActionGroupEvent
|
||||
|
||||
if agee == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
age := e.ActionGroupEvent
|
||||
|
||||
if agee.Name != age.GroupName {
|
||||
return false
|
||||
}
|
||||
|
||||
if agee.Action != age.Action {
|
||||
return false
|
||||
}
|
||||
case event.ApplyType:
|
||||
aee := ee.ApplyEvent
|
||||
// If no more information is specified, we consider it a match.
|
||||
if aee == nil {
|
||||
return true
|
||||
}
|
||||
ae := e.ApplyEvent
|
||||
|
||||
if aee.Identifier != nilIdentifier {
|
||||
if aee.Identifier != ae.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if aee.Operation != ae.Operation {
|
||||
return false
|
||||
}
|
||||
|
||||
if aee.Error != nil {
|
||||
return ae.Error != nil
|
||||
}
|
||||
return ae.Error == nil
|
||||
|
||||
case event.StatusType:
|
||||
see := ee.StatusEvent
|
||||
if see == nil {
|
||||
return true
|
||||
}
|
||||
se := e.StatusEvent
|
||||
|
||||
if see.Identifier != se.Identifier {
|
||||
return false
|
||||
}
|
||||
|
||||
if see.Status != se.PollResourceInfo.Status {
|
||||
return false
|
||||
}
|
||||
|
||||
if see.Error != nil {
|
||||
return se.Error != nil
|
||||
}
|
||||
return se.Error == nil
|
||||
|
||||
case event.PruneType:
|
||||
pee := ee.PruneEvent
|
||||
if pee == nil {
|
||||
return true
|
||||
}
|
||||
pe := e.PruneEvent
|
||||
|
||||
if pee.Identifier != nilIdentifier {
|
||||
if pee.Identifier != pe.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if pee.Operation != pe.Operation {
|
||||
return false
|
||||
}
|
||||
|
||||
if pee.Error != nil {
|
||||
return pe.Error != nil
|
||||
}
|
||||
return pe.Error == nil
|
||||
|
||||
case event.DeleteType:
|
||||
dee := ee.DeleteEvent
|
||||
if dee == nil {
|
||||
return true
|
||||
}
|
||||
de := e.DeleteEvent
|
||||
|
||||
if dee.Identifier != nilIdentifier {
|
||||
if dee.Identifier != de.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if dee.Operation != de.Operation {
|
||||
return false
|
||||
}
|
||||
|
||||
if dee.Error != nil {
|
||||
return de.Error != nil
|
||||
}
|
||||
return de.Error == nil
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
@ -14,6 +14,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/apply"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/inventory"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -38,18 +39,12 @@ func applyAndDestroyTest(c client.Client, invConfig InventoryConfig, inventoryNa
|
|||
Expect(e.Type).NotTo(Equal(event.ErrorType))
|
||||
applierEvents = append(applierEvents, e)
|
||||
}
|
||||
err := verifyEvents([]expEvent{
|
||||
err := testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
eventType: event.PruneType,
|
||||
EventType: event.ApplyType,
|
||||
},
|
||||
}, applierEvents)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
|
@ -63,12 +58,9 @@ func applyAndDestroyTest(c client.Client, invConfig InventoryConfig, inventoryNa
|
|||
destroyInv := createInventoryInfo(invConfig, inventoryName, namespaceName, inventoryID)
|
||||
option := &apply.DestroyerOption{InventoryPolicy: inventory.AdoptIfNoInventory}
|
||||
destroyerEvents := runCollectNoErr(destroyer.Run(destroyInv, option))
|
||||
err = verifyEvents([]expEvent{
|
||||
err = testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
},
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
EventType: event.DeleteType,
|
||||
},
|
||||
}, destroyerEvents)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
|
|
|||
|
|
@ -16,8 +16,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/common"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/kustomize/kyaml/yaml"
|
||||
)
|
||||
|
||||
|
|
@ -180,184 +178,3 @@ func updateReplicas(u *unstructured.Unstructured, replicas int) *unstructured.Un
|
|||
}
|
||||
return u
|
||||
}
|
||||
|
||||
type expEvent struct {
|
||||
eventType event.Type
|
||||
|
||||
applyEvent *expApplyEvent
|
||||
statusEvent *expStatusEvent
|
||||
pruneEvent *expPruneEvent
|
||||
deleteEvent *expDeleteEvent
|
||||
}
|
||||
|
||||
type expApplyEvent struct {
|
||||
applyEventType event.ApplyEventType
|
||||
operation event.ApplyEventOperation
|
||||
identifier object.ObjMetadata
|
||||
error error
|
||||
}
|
||||
|
||||
type expStatusEvent struct {
|
||||
statusEventType event.StatusEventType
|
||||
identifier object.ObjMetadata
|
||||
status status.Status
|
||||
error error
|
||||
}
|
||||
|
||||
type expPruneEvent struct {
|
||||
pruneEventType event.PruneEventType
|
||||
operation event.PruneEventOperation
|
||||
identifier object.ObjMetadata
|
||||
error error
|
||||
}
|
||||
|
||||
type expDeleteEvent struct {
|
||||
deleteEventType event.DeleteEventType
|
||||
operation event.DeleteEventOperation
|
||||
identifier object.ObjMetadata
|
||||
error error
|
||||
}
|
||||
|
||||
func verifyEvents(expEvents []expEvent, events []event.Event) error {
|
||||
expEventIndex := 0
|
||||
for i := range events {
|
||||
e := events[i]
|
||||
ee := expEvents[expEventIndex]
|
||||
if isMatch(ee, e) {
|
||||
expEventIndex += 1
|
||||
if expEventIndex >= len(expEvents) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("event %s not found", expEvents[expEventIndex].eventType)
|
||||
}
|
||||
|
||||
var nilIdentifier = object.ObjMetadata{}
|
||||
|
||||
// nolint:gocyclo
|
||||
// TODO(mortent): This function is pretty complex and with quite a bit of
|
||||
// duplication. We should see if there is a better way to provide a flexible
|
||||
// way to verify that we go the expected events.
|
||||
func isMatch(ee expEvent, e event.Event) bool {
|
||||
if ee.eventType != e.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
// nolint:gocritic
|
||||
switch e.Type {
|
||||
case event.ApplyType:
|
||||
aee := ee.applyEvent
|
||||
// If no more information is specified, we consider it a match.
|
||||
if aee == nil {
|
||||
return true
|
||||
}
|
||||
ae := e.ApplyEvent
|
||||
|
||||
if aee.applyEventType != ae.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
if aee.applyEventType == event.ApplyEventResourceUpdate {
|
||||
if aee.identifier != nilIdentifier {
|
||||
if aee.identifier != ae.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if aee.operation != ae.Operation {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if aee.error != nil {
|
||||
return ae.Error != nil
|
||||
}
|
||||
return ae.Error == nil
|
||||
|
||||
case event.StatusType:
|
||||
see := ee.statusEvent
|
||||
if see == nil {
|
||||
return true
|
||||
}
|
||||
se := e.StatusEvent
|
||||
|
||||
if see.statusEventType != se.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
if see.statusEventType == event.StatusEventResourceUpdate {
|
||||
if see.identifier != nilIdentifier {
|
||||
if see.identifier != se.Resource.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if see.status != se.Resource.Status {
|
||||
return false
|
||||
}
|
||||
|
||||
if see.error != nil {
|
||||
return se.Resource.Error != nil
|
||||
}
|
||||
return se.Resource.Error == nil
|
||||
}
|
||||
|
||||
case event.PruneType:
|
||||
pee := ee.pruneEvent
|
||||
if pee == nil {
|
||||
return true
|
||||
}
|
||||
pe := e.PruneEvent
|
||||
|
||||
if pee.pruneEventType != pe.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
if pee.pruneEventType == event.PruneEventResourceUpdate {
|
||||
if pee.identifier != nilIdentifier {
|
||||
if pee.identifier != pe.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if pee.operation != pe.Operation {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if pee.error != nil {
|
||||
return pe.Error != nil
|
||||
}
|
||||
return pe.Error == nil
|
||||
|
||||
case event.DeleteType:
|
||||
dee := ee.deleteEvent
|
||||
if dee == nil {
|
||||
return true
|
||||
}
|
||||
de := e.DeleteEvent
|
||||
|
||||
if dee.deleteEventType != de.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
if dee.deleteEventType == event.DeleteEventResourceUpdate {
|
||||
if dee.identifier != nilIdentifier {
|
||||
if dee.identifier != de.Identifier {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if dee.operation != de.Operation {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if dee.error != nil {
|
||||
return de.Error != nil
|
||||
}
|
||||
return de.Error == nil
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/inventory"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -36,34 +37,23 @@ func continueOnErrorTest(_ client.Client, invConfig InventoryConfig, inventoryNa
|
|||
Expect(e.Type).NotTo(Equal(event.ErrorType))
|
||||
applierEvents = append(applierEvents, e)
|
||||
}
|
||||
err := verifyEvents([]expEvent{
|
||||
err := testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.InitType,
|
||||
EventType: event.InitType,
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Failed,
|
||||
identifier: object.UnstructuredToObjMeta(manifestToUnstructured(invalidCrd)),
|
||||
error: fmt.Errorf("failed to apply"),
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Identifier: object.UnstructuredToObjMeta(manifestToUnstructured(invalidCrd)),
|
||||
Error: fmt.Errorf("failed to apply"),
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Created,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Created,
|
||||
},
|
||||
},
|
||||
{
|
||||
// complete
|
||||
eventType: event.ApplyType,
|
||||
},
|
||||
{
|
||||
// complete
|
||||
eventType: event.PruneType,
|
||||
},
|
||||
}, applierEvents)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
|
|
@ -71,13 +61,12 @@ func continueOnErrorTest(_ client.Client, invConfig InventoryConfig, inventoryNa
|
|||
destroyer := invConfig.DestroyerFactoryFunc()
|
||||
option := &apply.DestroyerOption{InventoryPolicy: inventory.AdoptIfNoInventory}
|
||||
destroyerEvents := runCollectNoErr(destroyer.Run(inv, option))
|
||||
err = verifyEvents([]expEvent{
|
||||
err = testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
deleteEvent: &expDeleteEvent{
|
||||
deleteEventType: event.DeleteEventResourceUpdate,
|
||||
operation: event.Deleted,
|
||||
error: nil,
|
||||
EventType: event.DeleteType,
|
||||
DeleteEvent: &testutil.ExpDeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
}, destroyerEvents)
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/inventory"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -41,37 +42,33 @@ func crdTest(_ client.Client, invConfig InventoryConfig, inventoryName, namespac
|
|||
Expect(e.Type).NotTo(Equal(event.ErrorType))
|
||||
applierEvents = append(applierEvents, e)
|
||||
}
|
||||
err := verifyEvents([]expEvent{
|
||||
err := testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Created,
|
||||
identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
error: nil,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Created,
|
||||
Identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.StatusType,
|
||||
statusEvent: &expStatusEvent{
|
||||
statusEventType: event.StatusEventResourceUpdate,
|
||||
identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
status: status.CurrentStatus,
|
||||
error: nil,
|
||||
EventType: event.StatusType,
|
||||
StatusEvent: &testutil.ExpStatusEvent{
|
||||
Identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
Status: status.CurrentStatus,
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Created,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Created,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Created,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Created,
|
||||
},
|
||||
},
|
||||
}, applierEvents)
|
||||
|
|
@ -81,30 +78,27 @@ func crdTest(_ client.Client, invConfig InventoryConfig, inventoryName, namespac
|
|||
destroyer := invConfig.DestroyerFactoryFunc()
|
||||
option := &apply.DestroyerOption{InventoryPolicy: inventory.AdoptIfNoInventory}
|
||||
destroyerEvents := runCollectNoErr(destroyer.Run(inv, option))
|
||||
err = verifyEvents([]expEvent{
|
||||
err = testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
deleteEvent: &expDeleteEvent{
|
||||
deleteEventType: event.DeleteEventResourceUpdate,
|
||||
operation: event.Deleted,
|
||||
error: nil,
|
||||
EventType: event.DeleteType,
|
||||
DeleteEvent: &testutil.ExpDeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
deleteEvent: &expDeleteEvent{
|
||||
deleteEventType: event.DeleteEventResourceUpdate,
|
||||
operation: event.Deleted,
|
||||
error: nil,
|
||||
EventType: event.DeleteType,
|
||||
DeleteEvent: &testutil.ExpDeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.DeleteType,
|
||||
deleteEvent: &expDeleteEvent{
|
||||
deleteEventType: event.DeleteEventResourceUpdate,
|
||||
operation: event.Deleted,
|
||||
identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
error: nil,
|
||||
EventType: event.DeleteType,
|
||||
DeleteEvent: &testutil.ExpDeleteEvent{
|
||||
Operation: event.Deleted,
|
||||
Identifier: object.UnstructuredToObjMeta(manifestToUnstructured(crd)),
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
}, destroyerEvents)
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/apply/event"
|
||||
"sigs.k8s.io/cli-utils/pkg/inventory"
|
||||
"sigs.k8s.io/cli-utils/pkg/object"
|
||||
"sigs.k8s.io/cli-utils/pkg/testutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -54,20 +55,12 @@ func inventoryPolicyMustMatchTest(c client.Client, invConfig InventoryConfig, na
|
|||
}
|
||||
|
||||
By("Verify the events")
|
||||
err := verifyEvents([]expEvent{
|
||||
err := testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Unchanged,
|
||||
identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
error: inventory.NewInventoryOverlapError(fmt.Errorf("test")),
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
Error: inventory.NewInventoryOverlapError(fmt.Errorf("test")),
|
||||
},
|
||||
},
|
||||
}, events)
|
||||
|
|
@ -111,20 +104,13 @@ func inventoryPolicyAdoptIfNoInventoryTest(c client.Client, invConfig InventoryC
|
|||
}
|
||||
|
||||
By("Verify the events")
|
||||
err = verifyEvents([]expEvent{
|
||||
err = testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Configured,
|
||||
identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
}, events)
|
||||
|
|
@ -178,20 +164,13 @@ func inventoryPolicyAdoptAllTest(c client.Client, invConfig InventoryConfig, nam
|
|||
}
|
||||
|
||||
By("Verify the events")
|
||||
err := verifyEvents([]expEvent{
|
||||
err := testutil.VerifyEvents([]testutil.ExpEvent{
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventResourceUpdate,
|
||||
operation: event.Configured,
|
||||
identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
error: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
eventType: event.ApplyType,
|
||||
applyEvent: &expApplyEvent{
|
||||
applyEventType: event.ApplyEventCompleted,
|
||||
EventType: event.ApplyType,
|
||||
ApplyEvent: &testutil.ExpApplyEvent{
|
||||
Operation: event.Configured,
|
||||
Identifier: object.UnstructuredToObjMeta(deploymentManifest(namespaceName)),
|
||||
Error: nil,
|
||||
},
|
||||
},
|
||||
}, events)
|
||||
|
|
|
|||
Loading…
Reference in New Issue