diff --git a/.golangci.yml b/.golangci.yml index 13f4aa7..16fbba0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,13 +15,13 @@ linters: - dogsled - dupl - errcheck + - exportloopref - gochecknoinits - goconst - gocritic - gocyclo - gofmt - goimports - - golint - gosec - gosimple - govet @@ -30,10 +30,10 @@ linters: - lll - misspell - nakedret - - scopelint - staticcheck - structcheck - stylecheck + - revive - typecheck - unconvert - unparam @@ -49,8 +49,6 @@ linters-settings: line-length: 170 gocyclo: min-complexity: 30 - golint: - min-confidence: 0.85 issues: # List of regexps of issue texts to exclude, empty list by default. diff --git a/Makefile b/Makefile index b992268..c1f9c28 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ install-addlicense: (which $(GOPATH)/bin/addlicense || go install github.com/google/addlicense@v1.0.0) install-lint: - (which $(GOPATH)/bin/golangci-lint || go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1) + (which $(GOPATH)/bin/golangci-lint || go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.44.0) install-deepcopy-gen: (which $(GOPATH)/bin/deepcopy-gen || go install k8s.io/code-generator/cmd/deepcopy-gen@v0.23.3) diff --git a/cmd/apply/cmdapply.go b/cmd/apply/cmdapply.go index 512525e..89308bb 100644 --- a/cmd/apply/cmdapply.go +++ b/cmd/apply/cmdapply.go @@ -21,9 +21,9 @@ import ( "sigs.k8s.io/cli-utils/pkg/printers" ) -func GetApplyRunner(factory cmdutil.Factory, invFactory inventory.InventoryClientFactory, - loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *ApplyRunner { - r := &ApplyRunner{ +func GetRunner(factory cmdutil.Factory, invFactory inventory.ClientFactory, + loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *Runner { + r := &Runner{ ioStreams: ioStreams, factory: factory, invFactory: invFactory, @@ -67,16 +67,16 @@ func GetApplyRunner(factory cmdutil.Factory, invFactory inventory.InventoryClien return r } -func ApplyCommand(f cmdutil.Factory, invFactory inventory.InventoryClientFactory, loader manifestreader.ManifestLoader, +func Command(f cmdutil.Factory, invFactory inventory.ClientFactory, loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *cobra.Command { - return GetApplyRunner(f, invFactory, loader, ioStreams).Command + return GetRunner(f, invFactory, loader, ioStreams).Command } -type ApplyRunner struct { +type Runner struct { Command *cobra.Command ioStreams genericclioptions.IOStreams factory cmdutil.Factory - invFactory inventory.InventoryClientFactory + invFactory inventory.ClientFactory loader manifestreader.ManifestLoader serverSideOptions common.ServerSideOptions @@ -91,7 +91,7 @@ type ApplyRunner struct { printStatusEvents bool } -func (r *ApplyRunner) RunE(cmd *cobra.Command, args []string) error { +func (r *Runner) RunE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() // If specified, cancel with timeout. if r.timeout != 0 { @@ -134,7 +134,7 @@ func (r *ApplyRunner) RunE(cmd *cobra.Command, args []string) error { } inv := inventory.WrapInventoryInfoObj(invObj) - invClient, err := r.invFactory.NewInventoryClient(r.factory) + invClient, err := r.invFactory.NewClient(r.factory) if err != nil { return err } diff --git a/cmd/destroy/cmddestroy.go b/cmd/destroy/cmddestroy.go index 88f02c9..68225ed 100644 --- a/cmd/destroy/cmddestroy.go +++ b/cmd/destroy/cmddestroy.go @@ -21,10 +21,10 @@ import ( "sigs.k8s.io/cli-utils/pkg/printers" ) -// GetDestroyRunner creates and returns the DestroyRunner which stores the cobra command. -func GetDestroyRunner(factory cmdutil.Factory, invFactory inventory.InventoryClientFactory, - loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *DestroyRunner { - r := &DestroyRunner{ +// GetRunner creates and returns the Runner which stores the cobra command. +func GetRunner(factory cmdutil.Factory, invFactory inventory.ClientFactory, + loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *Runner { + r := &Runner{ ioStreams: ioStreams, factory: factory, invFactory: invFactory, @@ -55,18 +55,18 @@ func GetDestroyRunner(factory cmdutil.Factory, invFactory inventory.InventoryCli return r } -// DestroyCommand creates the DestroyRunner, returning the cobra command associated with it. -func DestroyCommand(f cmdutil.Factory, invFactory inventory.InventoryClientFactory, loader manifestreader.ManifestLoader, +// Command creates the Runner, returning the cobra command associated with it. +func Command(f cmdutil.Factory, invFactory inventory.ClientFactory, loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *cobra.Command { - return GetDestroyRunner(f, invFactory, loader, ioStreams).Command + return GetRunner(f, invFactory, loader, ioStreams).Command } -// DestroyRunner encapsulates data necessary to run the destroy command. -type DestroyRunner struct { +// Runner encapsulates data necessary to run the destroy command. +type Runner struct { Command *cobra.Command ioStreams genericclioptions.IOStreams factory cmdutil.Factory - invFactory inventory.InventoryClientFactory + invFactory inventory.ClientFactory loader manifestreader.ManifestLoader output string @@ -77,7 +77,7 @@ type DestroyRunner struct { printStatusEvents bool } -func (r *DestroyRunner) RunE(cmd *cobra.Command, args []string) error { +func (r *Runner) RunE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() // If specified, cancel with timeout. if r.timeout != 0 { @@ -114,7 +114,7 @@ func (r *DestroyRunner) RunE(cmd *cobra.Command, args []string) error { } inv := inventory.WrapInventoryInfoObj(invObj) - invClient, err := r.invFactory.NewInventoryClient(r.factory) + invClient, err := r.invFactory.NewClient(r.factory) if err != nil { return err } diff --git a/cmd/diff/cmddiff.go b/cmd/diff/cmddiff.go index 8a5c418..bd77bd4 100644 --- a/cmd/diff/cmddiff.go +++ b/cmd/diff/cmddiff.go @@ -19,10 +19,10 @@ import ( const tmpDirPrefix = "diff-cmd" -// NewCmdDiff returns cobra command to implement client-side diff of package +// NewCommand returns cobra command to implement client-side diff of package // directory. For each local config file, get the resource in the cluster // and diff the local config resource against the resource in the cluster. -func NewCmdDiff(f util.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { +func NewCommand(f util.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := diff.NewDiffOptions(ioStreams) cmd := &cobra.Command{ Use: "diff (DIRECTORY | STDIN)", diff --git a/cmd/flagutils/utils.go b/cmd/flagutils/utils.go index b1ce1e0..3d80cd9 100644 --- a/cmd/flagutils/utils.go +++ b/cmd/flagutils/utils.go @@ -33,16 +33,16 @@ func ConvertPropagationPolicy(propagationPolicy string) (metav1.DeletionPropagat } } -func ConvertInventoryPolicy(policy string) (inventory.InventoryPolicy, error) { +func ConvertInventoryPolicy(policy string) (inventory.Policy, error) { switch policy { case InventoryPolicyStrict: - return inventory.InventoryPolicyMustMatch, nil + return inventory.PolicyMustMatch, nil case InventoryPolicyAdopt: - return inventory.AdoptIfNoInventory, nil + return inventory.PolicyAdoptIfNoInventory, nil case InventoryPolicyForceAdopt: - return inventory.AdoptAll, nil + return inventory.PolicyAdoptAll, nil default: - return inventory.InventoryPolicyMustMatch, fmt.Errorf( + return inventory.PolicyMustMatch, fmt.Errorf( "inventory policy must be one of strict, adopt") } } diff --git a/cmd/flagutils/utils_test.go b/cmd/flagutils/utils_test.go index f67ca83..a4b52ea 100644 --- a/cmd/flagutils/utils_test.go +++ b/cmd/flagutils/utils_test.go @@ -13,20 +13,20 @@ import ( func TestConvertInventoryPolicy(t *testing.T) { testcases := []struct { value string - policy inventory.InventoryPolicy + policy inventory.Policy err error }{ { value: "strict", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, }, { value: "adopt", - policy: inventory.AdoptIfNoInventory, + policy: inventory.PolicyAdoptIfNoInventory, }, { value: "force-adopt", - policy: inventory.AdoptAll, + policy: inventory.PolicyAdoptAll, }, { value: "random", diff --git a/cmd/main.go b/cmd/main.go index ee1369f..36ffc94 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -56,16 +56,16 @@ func main() { initCmd := initcmd.NewCmdInit(f, ioStreams) updateHelp(names, initCmd) loader := manifestreader.NewManifestLoader(f) - invFactory := inventory.ClusterInventoryClientFactory{} - applyCmd := apply.ApplyCommand(f, invFactory, loader, ioStreams) + invFactory := inventory.ClusterClientFactory{} + applyCmd := apply.Command(f, invFactory, loader, ioStreams) updateHelp(names, applyCmd) - previewCmd := preview.PreviewCommand(f, invFactory, loader, ioStreams) + previewCmd := preview.Command(f, invFactory, loader, ioStreams) updateHelp(names, previewCmd) - diffCmd := diff.NewCmdDiff(f, ioStreams) + diffCmd := diff.NewCommand(f, ioStreams) updateHelp(names, diffCmd) - destroyCmd := destroy.DestroyCommand(f, invFactory, loader, ioStreams) + destroyCmd := destroy.Command(f, invFactory, loader, ioStreams) updateHelp(names, destroyCmd) - statusCmd := status.StatusCommand(f, invFactory, loader) + statusCmd := status.Command(f, invFactory, loader) updateHelp(names, statusCmd) cmd.AddCommand(initCmd, applyCmd, diffCmd, destroyCmd, previewCmd, statusCmd) diff --git a/cmd/preview/cmdpreview.go b/cmd/preview/cmdpreview.go index 916239f..aecb4e1 100644 --- a/cmd/preview/cmdpreview.go +++ b/cmd/preview/cmdpreview.go @@ -27,10 +27,10 @@ var ( previewDestroy = false ) -// GetPreviewRunner creates and returns the PreviewRunner which stores the cobra command. -func GetPreviewRunner(factory cmdutil.Factory, invFactory inventory.InventoryClientFactory, - loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *PreviewRunner { - r := &PreviewRunner{ +// GetRunner creates and returns the Runner which stores the cobra command. +func GetRunner(factory cmdutil.Factory, invFactory inventory.ClientFactory, + loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *Runner { + r := &Runner{ factory: factory, invFactory: invFactory, loader: loader, @@ -64,17 +64,17 @@ func GetPreviewRunner(factory cmdutil.Factory, invFactory inventory.InventoryCli return r } -// PreviewCommand creates the PreviewRunner, returning the cobra command associated with it. -func PreviewCommand(f cmdutil.Factory, invFactory inventory.InventoryClientFactory, loader manifestreader.ManifestLoader, +// Command creates the Runner, returning the cobra command associated with it. +func Command(f cmdutil.Factory, invFactory inventory.ClientFactory, loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *cobra.Command { - return GetPreviewRunner(f, invFactory, loader, ioStreams).Command + return GetRunner(f, invFactory, loader, ioStreams).Command } -// PreviewRunner encapsulates data necessary to run the preview command. -type PreviewRunner struct { +// Runner encapsulates data necessary to run the preview command. +type Runner struct { Command *cobra.Command factory cmdutil.Factory - invFactory inventory.InventoryClientFactory + invFactory inventory.ClientFactory loader manifestreader.ManifestLoader ioStreams genericclioptions.IOStreams @@ -85,7 +85,7 @@ type PreviewRunner struct { } // RunE is the function run from the cobra command. -func (r *PreviewRunner) RunE(cmd *cobra.Command, args []string) error { +func (r *Runner) RunE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() // If specified, cancel with timeout. if r.timeout != 0 { @@ -126,7 +126,7 @@ func (r *PreviewRunner) RunE(cmd *cobra.Command, args []string) error { } inv := inventory.WrapInventoryInfoObj(invObj) - invClient, err := r.invFactory.NewInventoryClient(r.factory) + invClient, err := r.invFactory.NewClient(r.factory) if err != nil { return err } diff --git a/cmd/status/cmdstatus.go b/cmd/status/cmdstatus.go index 0e78195..28f87d5 100644 --- a/cmd/status/cmdstatus.go +++ b/cmd/status/cmdstatus.go @@ -24,8 +24,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/manifestreader" ) -func GetStatusRunner(factory cmdutil.Factory, invFactory inventory.InventoryClientFactory, loader manifestreader.ManifestLoader) *StatusRunner { - r := &StatusRunner{ +func GetRunner(factory cmdutil.Factory, invFactory inventory.ClientFactory, loader manifestreader.ManifestLoader) *Runner { + r := &Runner{ factory: factory, invFactory: invFactory, loader: loader, @@ -47,16 +47,16 @@ func GetStatusRunner(factory cmdutil.Factory, invFactory inventory.InventoryClie return r } -func StatusCommand(f cmdutil.Factory, invFactory inventory.InventoryClientFactory, loader manifestreader.ManifestLoader) *cobra.Command { - return GetStatusRunner(f, invFactory, loader).Command +func Command(f cmdutil.Factory, invFactory inventory.ClientFactory, loader manifestreader.ManifestLoader) *cobra.Command { + return GetRunner(f, invFactory, loader).Command } -// StatusRunner captures the parameters for the command and contains +// Runner captures the parameters for the command and contains // the run function. -type StatusRunner struct { +type Runner struct { Command *cobra.Command factory cmdutil.Factory - invFactory inventory.InventoryClientFactory + invFactory inventory.ClientFactory loader manifestreader.ManifestLoader period time.Duration @@ -70,7 +70,7 @@ type StatusRunner struct { // runE implements the logic of the command and will delegate to the // poller to compute status for each of the resources. One of the printer // implementations takes care of printing the output. -func (r *StatusRunner) runE(cmd *cobra.Command, args []string) error { +func (r *Runner) runE(cmd *cobra.Command, args []string) error { _, err := common.DemandOneDirectory(args) if err != nil { return err @@ -91,7 +91,7 @@ func (r *StatusRunner) runE(cmd *cobra.Command, args []string) error { } inv := inventory.WrapInventoryInfoObj(invObj) - invClient, err := r.invFactory.NewInventoryClient(r.factory) + invClient, err := r.invFactory.NewClient(r.factory) if err != nil { return err } diff --git a/cmd/status/cmdstatus_test.go b/cmd/status/cmdstatus_test.go index 2365d0c..a017212 100644 --- a/cmd/status/cmdstatus_test.go +++ b/cmd/status/cmdstatus_test.go @@ -53,7 +53,7 @@ metadata: } ) -func TestStatusCommand(t *testing.T) { +func TestCommand(t *testing.T) { testCases := map[string]struct { pollUntil string printer string @@ -78,7 +78,7 @@ func TestStatusCommand(t *testing.T) { }, events: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depObject, Status: status.InProgressStatus, @@ -86,7 +86,7 @@ func TestStatusCommand(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: stsObject, Status: status.CurrentStatus, @@ -109,7 +109,7 @@ statefulset.apps/bar is Current: current }, events: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depObject, Status: status.InProgressStatus, @@ -117,7 +117,7 @@ statefulset.apps/bar is Current: current }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: stsObject, Status: status.InProgressStatus, @@ -125,7 +125,7 @@ statefulset.apps/bar is Current: current }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: stsObject, Status: status.CurrentStatus, @@ -133,7 +133,7 @@ statefulset.apps/bar is Current: current }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depObject, Status: status.CurrentStatus, @@ -158,7 +158,7 @@ deployment.apps/foo is Current: current }, events: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: stsObject, Status: status.NotFoundStatus, @@ -166,7 +166,7 @@ deployment.apps/foo is Current: current }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depObject, Status: status.NotFoundStatus, @@ -190,7 +190,7 @@ deployment.apps/foo is NotFound: notFound }, events: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: stsObject, Status: status.InProgressStatus, @@ -198,7 +198,7 @@ deployment.apps/foo is NotFound: notFound }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depObject, Status: status.InProgressStatus, @@ -219,9 +219,9 @@ deployment.apps/foo is InProgress: inProgress defer tf.Cleanup() loader := manifestreader.NewFakeLoader(tf, tc.inventory) - runner := &StatusRunner{ + runner := &Runner{ factory: tf, - invFactory: inventory.FakeInventoryClientFactory(tc.inventory), + invFactory: inventory.FakeClientFactory(tc.inventory), loader: loader, pollerFactoryFunc: func(c cmdutil.Factory) (poller.Poller, error) { return &fakePoller{tc.events}, nil diff --git a/cmd/status/printers/event/event_printer.go b/cmd/status/printers/event/printer.go similarity index 76% rename from cmd/status/printers/event/event_printer.go rename to cmd/status/printers/event/printer.go index 51c1266..c500911 100644 --- a/cmd/status/printers/event/event_printer.go +++ b/cmd/status/printers/event/printer.go @@ -14,16 +14,16 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -// eventPrinter implements the Printer interface and outputs the resource +// Printer implements the Printer interface and outputs the resource // status information as a list of events as they happen. -type eventPrinter struct { - ioStreams genericclioptions.IOStreams +type Printer struct { + IOStreams genericclioptions.IOStreams } -// NewEventPrinter returns a new instance of the eventPrinter. -func NewEventPrinter(ioStreams genericclioptions.IOStreams) *eventPrinter { - return &eventPrinter{ - ioStreams: ioStreams, +// NewPrinter returns a new instance of the eventPrinter. +func NewPrinter(ioStreams genericclioptions.IOStreams) *Printer { + return &Printer{ + IOStreams: ioStreams, } } @@ -31,7 +31,7 @@ func NewEventPrinter(ioStreams genericclioptions.IOStreams) *eventPrinter { // until the channel is closed. The provided cancelFunc is consulted on // every event and is responsible for stopping the poller when appropriate. // This function will block. -func (ep *eventPrinter) Print(ch <-chan pollevent.Event, identifiers object.ObjMetadataSet, +func (ep *Printer) Print(ch <-chan pollevent.Event, identifiers object.ObjMetadataSet, cancelFunc collector.ObserverFunc) error { coll := collector.NewResourceStatusCollector(identifiers) // The actual work is done by the collector, which will invoke the @@ -52,15 +52,15 @@ func (ep *eventPrinter) Print(ch <-chan pollevent.Event, identifiers object.ObjM return err } -func (ep *eventPrinter) printStatusEvent(se pollevent.Event) { - switch se.EventType { +func (ep *Printer) printStatusEvent(se pollevent.Event) { + switch se.Type { case pollevent.ResourceUpdateEvent: id := se.Resource.Identifier - printResourceStatus(id, se, ep.ioStreams) + printResourceStatus(id, se, ep.IOStreams) case pollevent.ErrorEvent: id := se.Resource.Identifier gk := id.GroupKind - fmt.Fprintf(ep.ioStreams.Out, "%s error: %s\n", resourceIDToString(gk, id.Name), + fmt.Fprintf(ep.IOStreams.Out, "%s error: %s\n", resourceIDToString(gk, id.Name), se.Error.Error()) } } diff --git a/cmd/status/printers/printers.go b/cmd/status/printers/printers.go index c23e985..5c63824 100644 --- a/cmd/status/printers/printers.go +++ b/cmd/status/printers/printers.go @@ -16,8 +16,8 @@ import ( func CreatePrinter(printerType string, ioStreams genericclioptions.IOStreams) (printer.Printer, error) { switch printerType { case "table": - return table.NewTablePrinter(ioStreams), nil + return table.NewPrinter(ioStreams), nil default: - return event.NewEventPrinter(ioStreams), nil + return event.NewPrinter(ioStreams), nil } } diff --git a/cmd/status/printers/table/table_printer.go b/cmd/status/printers/table/printer.go similarity index 81% rename from cmd/status/printers/table/table_printer.go rename to cmd/status/printers/table/printer.go index d82cc39..9bb93aa 100644 --- a/cmd/status/printers/table/table_printer.go +++ b/cmd/status/printers/table/printer.go @@ -18,22 +18,22 @@ const ( updateInterval = 1 * time.Second ) -// tablePrinter is an implementation of the Printer interface that outputs +// Printer is an implementation of the Printer interface that outputs // status information about resources in a table format with in-place updates. -type tablePrinter struct { - ioStreams genericclioptions.IOStreams +type Printer struct { + IOStreams genericclioptions.IOStreams } -// NewTablePrinter returns a new instance of the tablePrinter. -func NewTablePrinter(ioStreams genericclioptions.IOStreams) *tablePrinter { - return &tablePrinter{ - ioStreams: ioStreams, +// NewPrinter returns a new instance of the tablePrinter. +func NewPrinter(ioStreams genericclioptions.IOStreams) *Printer { + return &Printer{ + IOStreams: ioStreams, } } // Print take an event channel and outputs the status events on the channel // until the channel is closed . -func (t *tablePrinter) Print(ch <-chan event.Event, identifiers object.ObjMetadataSet, +func (t *Printer) Print(ch <-chan event.Event, identifiers object.ObjMetadataSet, cancelFunc collector.ObserverFunc) error { coll := collector.NewResourceStatusCollector(identifiers) stop := make(chan struct{}) @@ -76,11 +76,11 @@ var columns = []table.ColumnDefinition{ // Print prints the table of resources with their statuses until the // provided stop channel is closed. -func (t *tablePrinter) runPrintLoop(coll *CollectorAdapter, stop <-chan struct{}) <-chan struct{} { +func (t *Printer) runPrintLoop(coll *CollectorAdapter, stop <-chan struct{}) <-chan struct{} { finished := make(chan struct{}) baseTablePrinter := table.BaseTablePrinter{ - IOStreams: t.ioStreams, + IOStreams: t.IOStreams, Columns: columns, } diff --git a/pkg/apis/actuation/types.go b/pkg/apis/actuation/types.go index 49cbdfb..0f1dd64 100644 --- a/pkg/apis/actuation/types.go +++ b/pkg/apis/actuation/types.go @@ -79,6 +79,7 @@ type ObjectStatus struct { Generation int64 `json:"generation,omitempty"` } +//nolint:revive // consistent prefix improves tab-completion for enums //go:generate stringer -type=ActuationStrategy -linecomment type ActuationStrategy int @@ -87,6 +88,7 @@ const ( ActuationStrategyDelete // Delete ) +//nolint:revive // consistent prefix improves tab-completion for enums //go:generate stringer -type=ActuationStatus -linecomment type ActuationStatus int diff --git a/pkg/apply/applier.go b/pkg/apply/applier.go index 283a124..38e55af 100644 --- a/pkg/apply/applier.go +++ b/pkg/apply/applier.go @@ -44,16 +44,16 @@ const defaultPollInterval = 2 * time.Second type Applier struct { pruner *prune.Pruner statusPoller poller.Poller - invClient inventory.InventoryClient + invClient inventory.Client client dynamic.Interface openAPIGetter discovery.OpenAPISchemaInterface mapper meta.RESTMapper - infoHelper info.InfoHelper + infoHelper info.Helper } // prepareObjects returns the set of objects to apply and to prune or // an error if one occurred. -func (a *Applier) prepareObjects(localInv inventory.InventoryInfo, localObjs object.UnstructuredSet, +func (a *Applier) prepareObjects(localInv inventory.Info, localObjs object.UnstructuredSet, o ApplierOptions) (object.UnstructuredSet, object.UnstructuredSet, error) { if localInv == nil { return nil, nil, fmt.Errorf("the local inventory can't be nil") @@ -102,7 +102,7 @@ func (a *Applier) prepareObjects(localInv inventory.InventoryInfo, localObjs obj // before all the given resources have been applied to the cluster. Any // cancellation or timeout will only affect how long we Wait for the // resources to become current. -func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, objects object.UnstructuredSet, options ApplierOptions) <-chan event.Event { +func (a *Applier) Run(ctx context.Context, invInfo inventory.Info, objects object.UnstructuredSet, options ApplierOptions) <-chan event.Event { klog.V(4).Infof("apply run for %d objects", len(objects)) eventChannel := make(chan event.Event) setDefaults(&options) @@ -274,7 +274,7 @@ type ApplierOptions struct { PruneTimeout time.Duration // InventoryPolicy defines the inventory policy of apply. - InventoryPolicy inventory.InventoryPolicy + InventoryPolicy inventory.Policy // ValidationPolicy defines how to handle invalid objects. ValidationPolicy validation.Policy @@ -304,7 +304,7 @@ func handleError(eventChannel chan event.Event, err error) { // for the passed non cluster-scoped localObjs, plus the namespace // of the passed inventory object. This is used to skip deleting // namespaces which have currently applied objects in them. -func localNamespaces(localInv inventory.InventoryInfo, localObjs []object.ObjMetadata) sets.String { +func localNamespaces(localInv inventory.Info, localObjs []object.ObjMetadata) sets.String { namespaces := sets.NewString() for _, obj := range localObjs { if obj.Namespace != "" { diff --git a/pkg/apply/applier_builder.go b/pkg/apply/applier_builder.go index 72ed4d3..a052253 100644 --- a/pkg/apply/applier_builder.go +++ b/pkg/apply/applier_builder.go @@ -25,7 +25,7 @@ import ( type ApplierBuilder struct { // factory is only used to retrieve things that have not been provided explicitly. factory util.Factory - invClient inventory.InventoryClient + invClient inventory.Client client dynamic.Interface discoClient discovery.CachedDiscoveryInterface mapper meta.RESTMapper @@ -57,7 +57,7 @@ func (b *ApplierBuilder) Build() (*Applier, error) { client: bx.client, openAPIGetter: bx.discoClient, mapper: bx.mapper, - infoHelper: info.NewInfoHelper(bx.mapper, bx.unstructuredClientForMapping), + infoHelper: info.NewHelper(bx.mapper, bx.unstructuredClientForMapping), }, nil } @@ -124,7 +124,7 @@ func (b *ApplierBuilder) WithFactory(factory util.Factory) *ApplierBuilder { return b } -func (b *ApplierBuilder) WithInventoryClient(invClient inventory.InventoryClient) *ApplierBuilder { +func (b *ApplierBuilder) WithInventoryClient(invClient inventory.Client) *ApplierBuilder { b.invClient = invClient return b } diff --git a/pkg/apply/applier_test.go b/pkg/apply/applier_test.go index c03f42c..3f2bcd9 100644 --- a/pkg/apply/applier_test.go +++ b/pkg/apply/applier_test.go @@ -122,7 +122,7 @@ func TestApplier(t *testing.T) { clusterObjs: object.UnstructuredSet{}, options: ApplierOptions{ NoPrune: true, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, }, expectedEvents: []testutil.ExpEvent{ { @@ -229,12 +229,12 @@ func TestApplier(t *testing.T) { clusterObjs: object.UnstructuredSet{}, options: ApplierOptions{ ReconcileTimeout: time.Minute, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, EmitStatusEvents: true, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -242,7 +242,7 @@ func TestApplier(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.CurrentStatus, @@ -250,7 +250,7 @@ func TestApplier(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["secret"]), Status: status.CurrentStatus, @@ -425,12 +425,12 @@ func TestApplier(t *testing.T) { }, options: ApplierOptions{ ReconcileTimeout: time.Minute, - InventoryPolicy: inventory.AdoptIfNoInventory, + InventoryPolicy: inventory.PolicyAdoptIfNoInventory, EmitStatusEvents: true, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.CurrentStatus, @@ -438,7 +438,7 @@ func TestApplier(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["secret"]), Status: status.CurrentStatus, @@ -606,33 +606,33 @@ func TestApplier(t *testing.T) { testutil.Unstructured(t, resources["secret"], testutil.AddOwningInv(t, "test")), }, options: ApplierOptions{ - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, EmitStatusEvents: true, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["secret"]), Status: status.InProgressStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.NotFoundStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["secret"]), Status: status.NotFoundStatus, @@ -806,19 +806,19 @@ func TestApplier(t *testing.T) { }, options: ApplierOptions{ ReconcileTimeout: time.Minute, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, EmitStatusEvents: true, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.CurrentStatus, @@ -945,7 +945,7 @@ func TestApplier(t *testing.T) { testutil.Unstructured(t, resources["deployment"], testutil.AddOwningInv(t, "unmatched")), }, options: ApplierOptions{ - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, EmitStatusEvents: true, }, expectedEvents: []testutil.ExpEvent{ @@ -1052,19 +1052,19 @@ func TestApplier(t *testing.T) { testutil.Unstructured(t, resources["deployment"], testutil.AddOwningInv(t, "test")), }, options: ApplierOptions{ - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, EmitStatusEvents: true, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.NotFoundStatus, @@ -1201,13 +1201,13 @@ func TestApplier(t *testing.T) { clusterObjs: object.UnstructuredSet{}, options: ApplierOptions{ ReconcileTimeout: time.Minute, - InventoryPolicy: inventory.AdoptIfNoInventory, + InventoryPolicy: inventory.PolicyAdoptIfNoInventory, EmitStatusEvents: true, ValidationPolicy: validation.SkipInvalid, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["secret"]), Status: status.CurrentStatus, @@ -1382,7 +1382,7 @@ func TestApplier(t *testing.T) { clusterObjs: object.UnstructuredSet{}, options: ApplierOptions{ ReconcileTimeout: time.Minute, - InventoryPolicy: inventory.AdoptIfNoInventory, + InventoryPolicy: inventory.PolicyAdoptIfNoInventory, EmitStatusEvents: true, ValidationPolicy: validation.ExitEarly, }, @@ -1562,13 +1562,13 @@ func TestApplierCancel(t *testing.T) { // EmitStatusEvents required to test event output EmitStatusEvents: true, NoPrune: true, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, // ReconcileTimeout required to enable WaitTasks ReconcileTimeout: 1 * time.Minute, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -1576,7 +1576,7 @@ func TestApplierCancel(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -1720,13 +1720,13 @@ func TestApplierCancel(t *testing.T) { // EmitStatusEvents required to test event output EmitStatusEvents: true, NoPrune: true, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, // ReconcileTimeout required to enable WaitTasks ReconcileTimeout: 1 * time.Minute, }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -1734,7 +1734,7 @@ func TestApplierCancel(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.CurrentStatus, diff --git a/pkg/apply/common_test.go b/pkg/apply/common_test.go index 54370e9..6e7a94b 100644 --- a/pkg/apply/common_test.go +++ b/pkg/apply/common_test.go @@ -65,7 +65,7 @@ func (i inventoryInfo) toUnstructured() *unstructured.Unstructured { } } -func (i inventoryInfo) toWrapped() inventory.InventoryInfo { +func (i inventoryInfo) toWrapped() inventory.Info { return inventory.WrapInventoryInfoObj(i.toUnstructured()) } @@ -120,10 +120,10 @@ func newTestDestroyer( func newTestInventory( t *testing.T, tf *cmdtesting.TestFactory, -) inventory.InventoryClient { - // Use an InventoryClient with a fakeInfoHelper to allow generating Info +) inventory.Client { + // Use an Client with a fakeInfoHelper to allow generating Info // objects that use the FakeRESTClient as the UnstructuredClient. - invClient, err := inventory.ClusterInventoryClientFactory{}.NewInventoryClient(tf) + invClient, err := inventory.ClusterClientFactory{}.NewClient(tf) require.NoError(t, err) return invClient } diff --git a/pkg/apply/destroyer.go b/pkg/apply/destroyer.go index 3182f8b..c75165b 100644 --- a/pkg/apply/destroyer.go +++ b/pkg/apply/destroyer.go @@ -32,7 +32,7 @@ import ( // the ApplyOptions were responsible for printing progress. This is now // handled by a separate printer with the KubectlPrinterAdapter bridging // between the two. -func NewDestroyer(factory cmdutil.Factory, invClient inventory.InventoryClient) (*Destroyer, error) { +func NewDestroyer(factory cmdutil.Factory, invClient inventory.Client) (*Destroyer, error) { pruner, err := prune.NewPruner(factory, invClient) if err != nil { return nil, fmt.Errorf("error setting up PruneOptions: %w", err) @@ -55,12 +55,12 @@ type Destroyer struct { pruner *prune.Pruner StatusPoller poller.Poller factory cmdutil.Factory - invClient inventory.InventoryClient + invClient inventory.Client } type DestroyerOptions struct { // InventoryPolicy defines the inventory policy of apply. - InventoryPolicy inventory.InventoryPolicy + InventoryPolicy inventory.Policy // DryRunStrategy defines whether changes should actually be performed, // or if it is just talk and no action. @@ -99,7 +99,7 @@ func setDestroyerDefaults(o *DestroyerOptions) { // Run performs the destroy step. Passes the inventory object. This // happens asynchronously on progress and any errors are reported // back on the event channel. -func (d *Destroyer) Run(ctx context.Context, inv inventory.InventoryInfo, options DestroyerOptions) <-chan event.Event { +func (d *Destroyer) Run(ctx context.Context, inv inventory.Info, options DestroyerOptions) <-chan event.Event { eventChannel := make(chan event.Event) setDestroyerDefaults(&options) go func() { @@ -139,7 +139,7 @@ func (d *Destroyer) Run(ctx context.Context, inv inventory.InventoryInfo, option Pruner: d.pruner, DynamicClient: dynamicClient, OpenAPIGetter: d.factory.OpenAPIGetter(), - InfoHelper: info.NewInfoHelper(mapper, d.factory.UnstructuredClientForMapping), + InfoHelper: info.NewHelper(mapper, d.factory.UnstructuredClientForMapping), Mapper: mapper, InvClient: d.invClient, Destroy: true, diff --git a/pkg/apply/destroyer_test.go b/pkg/apply/destroyer_test.go index 703fcfd..f294fce 100644 --- a/pkg/apply/destroyer_test.go +++ b/pkg/apply/destroyer_test.go @@ -62,7 +62,7 @@ func TestDestroyerCancel(t *testing.T) { }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -70,7 +70,7 @@ func TestDestroyerCancel(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -184,7 +184,7 @@ func TestDestroyerCancel(t *testing.T) { }, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.InProgressStatus, @@ -192,7 +192,7 @@ func TestDestroyerCancel(t *testing.T) { }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: testutil.ToIdentifier(t, resources["deployment"]), Status: status.NotFoundStatus, diff --git a/pkg/apply/filter/inventory-policy-apply-filter.go b/pkg/apply/filter/inventory-policy-apply-filter.go index 8fd6b84..83e616d 100644 --- a/pkg/apply/filter/inventory-policy-apply-filter.go +++ b/pkg/apply/filter/inventory-policy-apply-filter.go @@ -22,8 +22,8 @@ import ( type InventoryPolicyApplyFilter struct { Client dynamic.Interface Mapper meta.RESTMapper - Inv inventory.InventoryInfo - InvPolicy inventory.InventoryPolicy + Inv inventory.Info + InvPolicy inventory.Policy } // Name returns a filter identifier for logging. @@ -38,7 +38,7 @@ func (ipaf InventoryPolicyApplyFilter) Filter(obj *unstructured.Unstructured) (b if obj == nil { return true, "missing object", nil } - if ipaf.InvPolicy == inventory.AdoptAll { + if ipaf.InvPolicy == inventory.PolicyAdoptAll { return false, "", nil } // Object must be retrieved from the cluster to get the inventory id. @@ -54,7 +54,7 @@ func (ipaf InventoryPolicyApplyFilter) Filter(obj *unstructured.Unstructured) (b // if an object should be applied. canApply, err := inventory.CanApply(ipaf.Inv, clusterObj, ipaf.InvPolicy) if !canApply { - invMatch := inventory.InventoryIDMatch(ipaf.Inv, clusterObj) + invMatch := inventory.IDMatch(ipaf.Inv, clusterObj) reason := fmt.Sprintf("inventory policy prevented apply (inventoryIDMatchStatus: %q, inventoryPolicy: %q)", invMatch, ipaf.InvPolicy) return true, reason, err diff --git a/pkg/apply/filter/inventory-policy-apply-filter_test.go b/pkg/apply/filter/inventory-policy-apply-filter_test.go index e8935c1..559683a 100644 --- a/pkg/apply/filter/inventory-policy-apply-filter_test.go +++ b/pkg/apply/filter/inventory-policy-apply-filter_test.go @@ -29,56 +29,56 @@ func TestInventoryPolicyApplyFilter(t *testing.T) { tests := map[string]struct { inventoryID string objInventoryID string - policy inventory.InventoryPolicy + policy inventory.Policy filtered bool isError bool }{ "inventory and object ids match, not filtered": { inventoryID: "foo", objInventoryID: "foo", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: false, isError: false, }, "inventory and object ids match and adopt, not filtered": { inventoryID: "foo", objInventoryID: "foo", - policy: inventory.AdoptIfNoInventory, + policy: inventory.PolicyAdoptIfNoInventory, filtered: false, isError: false, }, "inventory and object ids do no match and policy must match, filtered and error": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: true, isError: true, }, "inventory and object ids do no match and adopt if no inventory, filtered and error": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.AdoptIfNoInventory, + policy: inventory.PolicyAdoptIfNoInventory, filtered: true, isError: true, }, "inventory and object ids do no match and adopt all, not filtered": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.AdoptAll, + policy: inventory.PolicyAdoptAll, filtered: false, isError: false, }, "object id empty and adopt all, not filtered": { inventoryID: "foo", objInventoryID: "", - policy: inventory.AdoptAll, + policy: inventory.PolicyAdoptAll, filtered: false, isError: false, }, "object id empty and policy must match, filtered and error": { inventoryID: "foo", objInventoryID: "", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: true, isError: true, }, diff --git a/pkg/apply/filter/inventory-policy-filter.go b/pkg/apply/filter/inventory-policy-filter.go index 82c44f3..ea2048b 100644 --- a/pkg/apply/filter/inventory-policy-filter.go +++ b/pkg/apply/filter/inventory-policy-filter.go @@ -14,8 +14,8 @@ import ( // if an object should be pruned (deleted) because of the InventoryPolicy // and if the objects owning inventory identifier matchs the inventory id. type InventoryPolicyFilter struct { - Inv inventory.InventoryInfo - InvPolicy inventory.InventoryPolicy + Inv inventory.Info + InvPolicy inventory.Policy } // Name returns a filter identifier for logging. @@ -30,7 +30,7 @@ func (ipf InventoryPolicyFilter) Filter(obj *unstructured.Unstructured) (bool, s // Check the inventory id "match" and the adopt policy to determine // if an object should be pruned (deleted). if !inventory.CanPrune(ipf.Inv, obj, ipf.InvPolicy) { - invMatch := inventory.InventoryIDMatch(ipf.Inv, obj) + invMatch := inventory.IDMatch(ipf.Inv, obj) reason := fmt.Sprintf("inventory policy prevented deletion (inventoryIDMatchStatus: %q, inventoryPolicy: %q)", invMatch, ipf.InvPolicy) return true, reason, nil diff --git a/pkg/apply/filter/inventory-policy-filter_test.go b/pkg/apply/filter/inventory-policy-filter_test.go index 8f7378f..989c2b1 100644 --- a/pkg/apply/filter/inventory-policy-filter_test.go +++ b/pkg/apply/filter/inventory-policy-filter_test.go @@ -26,49 +26,49 @@ func TestInventoryPolicyFilter(t *testing.T) { tests := map[string]struct { inventoryID string objInventoryID string - policy inventory.InventoryPolicy + policy inventory.Policy filtered bool }{ "inventory and object ids match, not filtered": { inventoryID: "foo", objInventoryID: "foo", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: false, }, "inventory and object ids match and adopt, not filtered": { inventoryID: "foo", objInventoryID: "foo", - policy: inventory.AdoptIfNoInventory, + policy: inventory.PolicyAdoptIfNoInventory, filtered: false, }, "inventory and object ids do no match and policy must match, filtered": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: true, }, "inventory and object ids do no match and adopt if no inventory, filtered": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.AdoptIfNoInventory, + policy: inventory.PolicyAdoptIfNoInventory, filtered: true, }, "inventory and object ids do no match and adopt all, not filtered": { inventoryID: "foo", objInventoryID: "bar", - policy: inventory.AdoptAll, + policy: inventory.PolicyAdoptAll, filtered: false, }, "object id empty and adopt all, not filtered": { inventoryID: "foo", objInventoryID: "", - policy: inventory.AdoptAll, + policy: inventory.PolicyAdoptAll, filtered: false, }, "object id empty and policy must match, filtered": { inventoryID: "foo", objInventoryID: "", - policy: inventory.InventoryPolicyMustMatch, + policy: inventory.PolicyMustMatch, filtered: true, }, } diff --git a/pkg/apply/info/info_helper.go b/pkg/apply/info/helper.go similarity index 73% rename from pkg/apply/info/info_helper.go rename to pkg/apply/info/helper.go index cfb186a..a980b4a 100644 --- a/pkg/apply/info/info_helper.go +++ b/pkg/apply/info/helper.go @@ -10,8 +10,8 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -// InfoHelper provides functions for interacting with Info objects. -type InfoHelper interface { +// Helper provides functions for interacting with Info objects. +type Helper interface { // UpdateInfo sets the mapping and client for the provided Info // object. This must be called at a time when all needed resource // types are available in the RESTMapper. @@ -20,19 +20,19 @@ type InfoHelper interface { BuildInfo(obj *unstructured.Unstructured) (*resource.Info, error) } -func NewInfoHelper(mapper meta.RESTMapper, unstructuredClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error)) *infoHelper { - return &infoHelper{ +func NewHelper(mapper meta.RESTMapper, unstructuredClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error)) Helper { + return &helper{ mapper: mapper, unstructuredClientForMapping: unstructuredClientForMapping, } } -type infoHelper struct { +type helper struct { mapper meta.RESTMapper unstructuredClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) } -func (ih *infoHelper) UpdateInfo(info *resource.Info) error { +func (ih *helper) UpdateInfo(info *resource.Info) error { gvk := info.Object.GetObjectKind().GroupVersionKind() mapping, err := ih.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { @@ -48,7 +48,7 @@ func (ih *infoHelper) UpdateInfo(info *resource.Info) error { return nil } -func (ih *infoHelper) BuildInfo(obj *unstructured.Unstructured) (*resource.Info, error) { +func (ih *helper) BuildInfo(obj *unstructured.Unstructured) (*resource.Info, error) { info, err := object.UnstructuredToInfo(obj) if err != nil { return nil, err diff --git a/pkg/apply/prune/event-factory.go b/pkg/apply/prune/event-factory.go index 33cbc8e..a49961a 100644 --- a/pkg/apply/prune/event-factory.go +++ b/pkg/apply/prune/event-factory.go @@ -33,6 +33,7 @@ func CreateEventFactory(isDelete bool, groupName string) EventFactory { // PruneEventFactory implements EventFactory interface as a concrete // representation of for prune events. +//nolint:revive // stuttering ok because Prune is a type of PruneEvent type PruneEventFactory struct { groupName string } diff --git a/pkg/apply/prune/prune.go b/pkg/apply/prune/prune.go index 6d51df8..a510b6a 100644 --- a/pkg/apply/prune/prune.go +++ b/pkg/apply/prune/prune.go @@ -31,14 +31,14 @@ import ( // Pruner implements GetPruneObjs to calculate which objects to prune and Prune // to delete them. type Pruner struct { - InvClient inventory.InventoryClient + InvClient inventory.Client Client dynamic.Interface Mapper meta.RESTMapper } // NewPruner returns a new Pruner. // Returns an error if dependency injection fails using the factory. -func NewPruner(factory util.Factory, invClient inventory.InventoryClient) (*Pruner, error) { +func NewPruner(factory util.Factory, invClient inventory.Client) (*Pruner, error) { // Client/Builder fields from the Factory. client, err := factory.DynamicClient() if err != nil { @@ -204,7 +204,7 @@ func (p *Pruner) removeInventoryAnnotation(obj *unstructured.Unstructured) (*uns // objects minus the set of currently applied objects. Returns an error // if one occurs. func (p *Pruner) GetPruneObjs( - inv inventory.InventoryInfo, + inv inventory.Info, objs object.UnstructuredSet, opts Options, ) (object.UnstructuredSet, error) { diff --git a/pkg/apply/prune/prune_test.go b/pkg/apply/prune/prune_test.go index 2098290..4d8bde2 100644 --- a/pkg/apply/prune/prune_test.go +++ b/pkg/apply/prune/prune_test.go @@ -122,7 +122,7 @@ metadata: // Returns a inventory object with the inventory set from // the passed "children". -func createInventoryInfo(children ...*unstructured.Unstructured) inventory.InventoryInfo { +func createInventoryInfo(children ...*unstructured.Unstructured) inventory.Info { inventoryObjCopy := inventoryObj.DeepCopy() wrappedInv := inventory.WrapInventoryObj(inventoryObjCopy) objs := object.UnstructuredSetToObjMetadataSet(children) @@ -455,7 +455,7 @@ func TestPrune(t *testing.T) { } pruneIds := object.UnstructuredSetToObjMetadataSet(tc.pruneObjs) po := Pruner{ - InvClient: inventory.NewFakeInventoryClient(pruneIds), + InvClient: inventory.NewFakeClient(pruneIds), Client: fake.NewSimpleDynamicClient(scheme.Scheme, objs...), Mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...), @@ -535,7 +535,7 @@ func TestPruneDeletionPrevention(t *testing.T) { t.Run(name, func(t *testing.T) { pruneID := object.UnstructuredToObjMetadata(tc.pruneObj) po := Pruner{ - InvClient: inventory.NewFakeInventoryClient(object.ObjMetadataSet{pruneID}), + InvClient: inventory.NewFakeClient(object.ObjMetadataSet{pruneID}), Client: fake.NewSimpleDynamicClient(scheme.Scheme, tc.pruneObj), Mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...), @@ -629,7 +629,7 @@ func TestPruneWithErrors(t *testing.T) { t.Run(name, func(t *testing.T) { pruneIds := object.UnstructuredSetToObjMetadataSet(tc.pruneObjs) po := Pruner{ - InvClient: inventory.NewFakeInventoryClient(pruneIds), + InvClient: inventory.NewFakeClient(pruneIds), // Set up the fake dynamic client to recognize all objects, and the RESTMapper. Client: &fakeDynamicClient{ resourceInterface: &failureNamespaceClient{}, @@ -720,7 +720,7 @@ func TestGetPruneObjs(t *testing.T) { objs = append(objs, obj) } po := Pruner{ - InvClient: inventory.NewFakeInventoryClient(object.UnstructuredSetToObjMetadataSet(tc.prevInventory)), + InvClient: inventory.NewFakeClient(object.UnstructuredSetToObjMetadataSet(tc.prevInventory)), Client: fake.NewSimpleDynamicClient(scheme.Scheme, objs...), Mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...), @@ -834,7 +834,7 @@ func TestPrune_PropagationPolicy(t *testing.T) { t.Run(name, func(t *testing.T) { captureClient := &optionsCaptureNamespaceClient{} po := Pruner{ - InvClient: inventory.NewFakeInventoryClient(object.ObjMetadataSet{}), + InvClient: inventory.NewFakeClient(object.ObjMetadataSet{}), Client: &fakeDynamicClient{ resourceInterface: captureClient, }, diff --git a/pkg/apply/solver/solver.go b/pkg/apply/solver/solver.go index c222985..de08929 100644 --- a/pkg/apply/solver/solver.go +++ b/pkg/apply/solver/solver.go @@ -41,9 +41,9 @@ type TaskQueueBuilder struct { Pruner *prune.Pruner DynamicClient dynamic.Interface OpenAPIGetter discovery.OpenAPISchemaInterface - InfoHelper info.InfoHelper + InfoHelper info.Helper Mapper meta.RESTMapper - InvClient inventory.InventoryClient + InvClient inventory.Client // Collector is used to collect validation errors and invalid objects. // Invalid objects will be filtered and not be injected into tasks. Collector *validation.Collector @@ -92,7 +92,7 @@ type Options struct { DryRunStrategy common.DryRunStrategy PrunePropagationPolicy metav1.DeletionPropagation PruneTimeout time.Duration - InventoryPolicy inventory.InventoryPolicy + InventoryPolicy inventory.Policy } // Build returns the queue of tasks that have been created @@ -102,7 +102,7 @@ func (t *TaskQueueBuilder) Build() *TaskQueue { // AppendInvAddTask appends an inventory add task to the task queue. // Returns a pointer to the Builder to chain function calls. -func (t *TaskQueueBuilder) AppendInvAddTask(inv inventory.InventoryInfo, applyObjs object.UnstructuredSet, +func (t *TaskQueueBuilder) AppendInvAddTask(inv inventory.Info, applyObjs object.UnstructuredSet, dryRun common.DryRunStrategy) *TaskQueueBuilder { applyObjs = t.Collector.FilterInvalidObjects(applyObjs) klog.V(2).Infoln("adding inventory add task (%d objects)", len(applyObjs)) @@ -113,13 +113,13 @@ func (t *TaskQueueBuilder) AppendInvAddTask(inv inventory.InventoryInfo, applyOb Objects: applyObjs, DryRun: dryRun, }) - t.invAddCounter += 1 + t.invAddCounter++ return t } // AppendInvSetTask appends an inventory set task to the task queue. // Returns a pointer to the Builder to chain function calls. -func (t *TaskQueueBuilder) AppendInvSetTask(inv inventory.InventoryInfo, dryRun common.DryRunStrategy) *TaskQueueBuilder { +func (t *TaskQueueBuilder) AppendInvSetTask(inv inventory.Info, dryRun common.DryRunStrategy) *TaskQueueBuilder { klog.V(2).Infoln("adding inventory set task") prevInvIds, _ := t.InvClient.GetClusterObjs(inv) t.tasks = append(t.tasks, &task.InvSetTask{ @@ -129,13 +129,13 @@ func (t *TaskQueueBuilder) AppendInvSetTask(inv inventory.InventoryInfo, dryRun PrevInventory: prevInvIds, DryRun: dryRun, }) - t.invSetCounter += 1 + t.invSetCounter++ return t } // AppendDeleteInvTask appends to the task queue a task to delete the inventory object. // Returns a pointer to the Builder to chain function calls. -func (t *TaskQueueBuilder) AppendDeleteInvTask(inv inventory.InventoryInfo, dryRun common.DryRunStrategy) *TaskQueueBuilder { +func (t *TaskQueueBuilder) AppendDeleteInvTask(inv inventory.Info, dryRun common.DryRunStrategy) *TaskQueueBuilder { klog.V(2).Infoln("adding delete inventory task") t.tasks = append(t.tasks, &task.DeleteInvTask{ TaskName: fmt.Sprintf("delete-inventory-%d", t.deleteInvCounter), @@ -143,7 +143,7 @@ func (t *TaskQueueBuilder) AppendDeleteInvTask(inv inventory.InventoryInfo, dryR InvInfo: inv, DryRun: dryRun, }) - t.deleteInvCounter += 1 + t.deleteInvCounter++ return t } @@ -165,7 +165,7 @@ func (t *TaskQueueBuilder) AppendApplyTask(applyObjs object.UnstructuredSet, InfoHelper: t.InfoHelper, Mapper: t.Mapper, }) - t.applyCounter += 1 + t.applyCounter++ return t } @@ -182,7 +182,7 @@ func (t *TaskQueueBuilder) AppendWaitTask(waitIds object.ObjMetadataSet, conditi waitTimeout, t.Mapper), ) - t.waitCounter += 1 + t.waitCounter++ return t } @@ -203,7 +203,7 @@ func (t *TaskQueueBuilder) AppendPruneTask(pruneObjs object.UnstructuredSet, Destroy: t.Destroy, }, ) - t.pruneCounter += 1 + t.pruneCounter++ return t } diff --git a/pkg/apply/solver/solver_test.go b/pkg/apply/solver/solver_test.go index 5b2a5d2..9c385f3 100644 --- a/pkg/apply/solver/solver_test.go +++ b/pkg/apply/solver/solver_test.go @@ -421,7 +421,7 @@ func TestTaskQueueBuilder_AppendApplyWaitTasks(t *testing.T) { } applyIds := object.UnstructuredSetToObjMetadataSet(tc.applyObjs) - fakeInvClient := inventory.NewFakeInventoryClient(applyIds) + fakeInvClient := inventory.NewFakeClient(applyIds) vCollector := &validation.Collector{} tqb := TaskQueueBuilder{ Pruner: pruner, @@ -786,7 +786,7 @@ func TestTaskQueueBuilder_AppendPruneWaitTasks(t *testing.T) { } pruneIds := object.UnstructuredSetToObjMetadataSet(tc.pruneObjs) - fakeInvClient := inventory.NewFakeInventoryClient(pruneIds) + fakeInvClient := inventory.NewFakeClient(pruneIds) vCollector := &validation.Collector{} tqb := TaskQueueBuilder{ Pruner: pruner, diff --git a/pkg/apply/task/apply_task.go b/pkg/apply/task/apply_task.go index 8403a50..7258442 100644 --- a/pkg/apply/task/apply_task.go +++ b/pkg/apply/task/apply_task.go @@ -49,7 +49,7 @@ type ApplyTask struct { DynamicClient dynamic.Interface OpenAPIGetter discovery.OpenAPISchemaInterface - InfoHelper info.InfoHelper + InfoHelper info.Helper Mapper meta.RESTMapper Objects object.UnstructuredSet Filters []filter.ValidationFilter diff --git a/pkg/apply/task/delete_inv_task.go b/pkg/apply/task/delete_inv_task.go index 9959830..b1bb75f 100644 --- a/pkg/apply/task/delete_inv_task.go +++ b/pkg/apply/task/delete_inv_task.go @@ -19,8 +19,8 @@ import ( // resources have been deleted. type DeleteInvTask struct { TaskName string - InvClient inventory.InventoryClient - InvInfo inventory.InventoryInfo + InvClient inventory.Client + InvInfo inventory.Info DryRun common.DryRunStrategy } diff --git a/pkg/apply/task/delete_inv_task_test.go b/pkg/apply/task/delete_inv_task_test.go index 8d90909..aa9ab35 100644 --- a/pkg/apply/task/delete_inv_task_test.go +++ b/pkg/apply/task/delete_inv_task_test.go @@ -37,7 +37,7 @@ func TestDeleteInvTask(t *testing.T) { } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - client := inventory.NewFakeInventoryClient(object.ObjMetadataSet{}) + client := inventory.NewFakeClient(object.ObjMetadataSet{}) client.Err = tc.err eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() diff --git a/pkg/apply/task/inv_add_task.go b/pkg/apply/task/inv_add_task.go index 3a5b80f..ef9087a 100644 --- a/pkg/apply/task/inv_add_task.go +++ b/pkg/apply/task/inv_add_task.go @@ -23,8 +23,8 @@ var ( // before the actual object is applied. type InvAddTask struct { TaskName string - InvClient inventory.InventoryClient - InvInfo inventory.InventoryInfo + InvClient inventory.Client + InvInfo inventory.Info Objects object.UnstructuredSet DryRun common.DryRunStrategy } @@ -74,7 +74,7 @@ func (i *InvAddTask) StatusUpdate(_ *taskrunner.TaskContext, _ object.ObjMetadat // inventoryNamespaceInSet returns the the namespace the passed inventory // object will be applied to, or nil if this namespace object does not exist // in the passed slice "infos" or the inventory object is cluster-scoped. -func inventoryNamespaceInSet(inv inventory.InventoryInfo, objs object.UnstructuredSet) *unstructured.Unstructured { +func inventoryNamespaceInSet(inv inventory.Info, objs object.UnstructuredSet) *unstructured.Unstructured { if inv == nil { return nil } diff --git a/pkg/apply/task/inv_add_task_test.go b/pkg/apply/task/inv_add_task_test.go index a228a8e..51d2804 100644 --- a/pkg/apply/task/inv_add_task_test.go +++ b/pkg/apply/task/inv_add_task_test.go @@ -107,7 +107,7 @@ func TestInvAddTask(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - client := inventory.NewFakeInventoryClient(tc.initialObjs) + client := inventory.NewFakeClient(tc.initialObjs) eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() context := taskrunner.NewTaskContext(eventChannel, resourceCache) @@ -142,7 +142,7 @@ func TestInventoryNamespaceInSet(t *testing.T) { inventoryNamespace := createNamespace(namespace) tests := map[string]struct { - inv inventory.InventoryInfo + inv inventory.Info objects []*unstructured.Unstructured namespace *unstructured.Unstructured }{ diff --git a/pkg/apply/task/inv_set_task.go b/pkg/apply/task/inv_set_task.go index 24bf51e..51f057a 100644 --- a/pkg/apply/task/inv_set_task.go +++ b/pkg/apply/task/inv_set_task.go @@ -16,8 +16,8 @@ import ( // inventory references at the end of the apply/prune. type InvSetTask struct { TaskName string - InvClient inventory.InventoryClient - InvInfo inventory.InventoryInfo + InvClient inventory.Client + InvInfo inventory.Info PrevInventory object.ObjMetadataSet DryRun common.DryRunStrategy } diff --git a/pkg/apply/task/inv_set_task_test.go b/pkg/apply/task/inv_set_task_test.go index 1281c4d..cacf069 100644 --- a/pkg/apply/task/inv_set_task_test.go +++ b/pkg/apply/task/inv_set_task_test.go @@ -163,7 +163,7 @@ func TestInvSetTask(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - client := inventory.NewFakeInventoryClient(object.ObjMetadataSet{}) + client := inventory.NewFakeClient(object.ObjMetadataSet{}) eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() context := taskrunner.NewTaskContext(eventChannel, resourceCache) diff --git a/pkg/apply/taskrunner/runner.go b/pkg/apply/taskrunner/runner.go index 733d40f..7218723 100644 --- a/pkg/apply/taskrunner/runner.go +++ b/pkg/apply/taskrunner/runner.go @@ -17,19 +17,19 @@ import ( ) // NewTaskStatusRunner returns a new TaskStatusRunner. -func NewTaskStatusRunner(identifiers object.ObjMetadataSet, statusPoller poller.Poller) *taskStatusRunner { - return &taskStatusRunner{ - identifiers: identifiers, - statusPoller: statusPoller, +func NewTaskStatusRunner(identifiers object.ObjMetadataSet, statusPoller poller.Poller) *TaskStatusRunner { + return &TaskStatusRunner{ + Identifiers: identifiers, + StatusPoller: statusPoller, } } -// taskStatusRunner is a taskRunner that executes a set of +// TaskStatusRunner is a taskRunner that executes a set of // tasks while at the same time uses the statusPoller to // keep track of the status of the resources. -type taskStatusRunner struct { - identifiers object.ObjMetadataSet - statusPoller poller.Poller +type TaskStatusRunner struct { + Identifiers object.ObjMetadataSet + StatusPoller poller.Poller } // Options defines properties that is passed along to @@ -49,7 +49,7 @@ type Options struct { // validation of wait conditions. // - eventChannel is written to with events based on status updates, if // emitStatusEvents is true. -func (tsr *taskStatusRunner) Run( +func (tsr *TaskStatusRunner) Run( ctx context.Context, taskContext *TaskContext, taskQueue chan Task, @@ -59,7 +59,7 @@ func (tsr *taskStatusRunner) Run( // If taskStatusRunner.Run is cancelled, baseRunner.run will exit early, // causing the poller to be cancelled. statusCtx, cancelFunc := context.WithCancel(context.Background()) - statusChannel := tsr.statusPoller.Poll(statusCtx, tsr.identifiers, polling.PollOptions{ + statusChannel := tsr.StatusPoller.Poll(statusCtx, tsr.Identifiers, polling.PollOptions{ PollInterval: opts.PollInterval, }) @@ -113,7 +113,7 @@ func (tsr *taskStatusRunner) Run( // An error event on the statusChannel means the StatusPoller // has encountered a problem so it can't continue. This means // the statusChannel will be closed soon. - if statusEvent.EventType == pollevent.ErrorEvent { + if statusEvent.Type == pollevent.ErrorEvent { abort = true abortReason = fmt.Errorf("polling for status failed: %v", statusEvent.Error) diff --git a/pkg/apply/taskrunner/runner_test.go b/pkg/apply/taskrunner/runner_test.go index 4538052..7e84a38 100644 --- a/pkg/apply/taskrunner/runner_test.go +++ b/pkg/apply/taskrunner/runner_test.go @@ -68,14 +68,14 @@ func TestBaseRunner(t *testing.T) { statusEventsDelay: 5 * time.Second, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: cmID, Status: status.CurrentStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depID, Status: status.CurrentStatus, @@ -129,7 +129,7 @@ func TestBaseRunner(t *testing.T) { statusEventsDelay: time.Second, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: cmID, Status: status.CurrentStatus, @@ -176,14 +176,14 @@ func TestBaseRunner(t *testing.T) { statusEventsDelay: time.Second, statusEvents: []pollevent.Event{ { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: cmID, Status: status.CurrentStatus, }, }, { - EventType: pollevent.ResourceUpdateEvent, + Type: pollevent.ResourceUpdateEvent, Resource: &pollevent.ResourceStatus{ Identifier: depID, Status: status.InProgressStatus, @@ -427,8 +427,8 @@ func TestBaseRunnerCancellation(t *testing.T) { statusEventsDelay: 2 * time.Second, statusEvents: []pollevent.Event{ { - EventType: pollevent.ErrorEvent, - Error: testError, + Type: pollevent.ErrorEvent, + Error: testError, }, }, contextTimeout: 30 * time.Second, diff --git a/pkg/inventory/fake-inventory-client.go b/pkg/inventory/fake-inventory-client.go index 0a99b3b..a8f1259 100644 --- a/pkg/inventory/fake-inventory-client.go +++ b/pkg/inventory/fake-inventory-client.go @@ -10,33 +10,33 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -// FakeInventoryClient is a testing implementation of the InventoryClient interface. -type FakeInventoryClient struct { +// FakeClient is a testing implementation of the Client interface. +type FakeClient struct { Objs object.ObjMetadataSet Err error } var ( - _ InventoryClient = &FakeInventoryClient{} - _ InventoryClientFactory = FakeInventoryClientFactory{} + _ Client = &FakeClient{} + _ ClientFactory = FakeClientFactory{} ) -type FakeInventoryClientFactory object.ObjMetadataSet +type FakeClientFactory object.ObjMetadataSet -func (f FakeInventoryClientFactory) NewInventoryClient(cmdutil.Factory) (InventoryClient, error) { - return NewFakeInventoryClient(object.ObjMetadataSet(f)), nil +func (f FakeClientFactory) NewClient(cmdutil.Factory) (Client, error) { + return NewFakeClient(object.ObjMetadataSet(f)), nil } -// NewFakeInventoryClient returns a FakeInventoryClient. -func NewFakeInventoryClient(initObjs object.ObjMetadataSet) *FakeInventoryClient { - return &FakeInventoryClient{ +// NewFakeClient returns a FakeClient. +func NewFakeClient(initObjs object.ObjMetadataSet) *FakeClient { + return &FakeClient{ Objs: initObjs, Err: nil, } } // GetClusterObjs returns currently stored set of objects. -func (fic *FakeInventoryClient) GetClusterObjs(InventoryInfo) (object.ObjMetadataSet, error) { +func (fic *FakeClient) GetClusterObjs(Info) (object.ObjMetadataSet, error) { if fic.Err != nil { return object.ObjMetadataSet{}, fic.Err } @@ -46,7 +46,7 @@ func (fic *FakeInventoryClient) GetClusterObjs(InventoryInfo) (object.ObjMetadat // Merge stores the passed objects with the current stored cluster inventory // objects. Returns the set difference of the current set of objects minus // the passed set of objects, or an error if one is set up. -func (fic *FakeInventoryClient) Merge(_ InventoryInfo, objs object.ObjMetadataSet, _ common.DryRunStrategy) (object.ObjMetadataSet, error) { +func (fic *FakeClient) Merge(_ Info, objs object.ObjMetadataSet, _ common.DryRunStrategy) (object.ObjMetadataSet, error) { if fic.Err != nil { return object.ObjMetadataSet{}, fic.Err } @@ -58,7 +58,7 @@ func (fic *FakeInventoryClient) Merge(_ InventoryInfo, objs object.ObjMetadataSe // Replace the stored cluster inventory objs with the passed obj, or an // error if one is set up. -func (fic *FakeInventoryClient) Replace(_ InventoryInfo, objs object.ObjMetadataSet, _ common.DryRunStrategy) error { +func (fic *FakeClient) Replace(_ Info, objs object.ObjMetadataSet, _ common.DryRunStrategy) error { if fic.Err != nil { return fic.Err } @@ -67,14 +67,14 @@ func (fic *FakeInventoryClient) Replace(_ InventoryInfo, objs object.ObjMetadata } // DeleteInventoryObj returns an error if one is forced; does nothing otherwise. -func (fic *FakeInventoryClient) DeleteInventoryObj(InventoryInfo, common.DryRunStrategy) error { +func (fic *FakeClient) DeleteInventoryObj(Info, common.DryRunStrategy) error { if fic.Err != nil { return fic.Err } return nil } -func (fic *FakeInventoryClient) ApplyInventoryNamespace(*unstructured.Unstructured, common.DryRunStrategy) error { +func (fic *FakeClient) ApplyInventoryNamespace(*unstructured.Unstructured, common.DryRunStrategy) error { if fic.Err != nil { return fic.Err } @@ -82,19 +82,19 @@ func (fic *FakeInventoryClient) ApplyInventoryNamespace(*unstructured.Unstructur } // SetError forces an error on the subsequent client call if it returns an error. -func (fic *FakeInventoryClient) SetError(err error) { +func (fic *FakeClient) SetError(err error) { fic.Err = err } // ClearError clears the force error -func (fic *FakeInventoryClient) ClearError() { +func (fic *FakeClient) ClearError() { fic.Err = nil } -func (fic *FakeInventoryClient) GetClusterInventoryInfo(InventoryInfo) (*unstructured.Unstructured, error) { +func (fic *FakeClient) GetClusterInventoryInfo(Info) (*unstructured.Unstructured, error) { return nil, nil } -func (fic *FakeInventoryClient) GetClusterInventoryObjs(_ InventoryInfo) (object.UnstructuredSet, error) { +func (fic *FakeClient) GetClusterInventoryObjs(_ Info) (object.UnstructuredSet, error) { return object.UnstructuredSet{}, nil } diff --git a/pkg/inventory/idmatchstatus_string.go b/pkg/inventory/idmatchstatus_string.go new file mode 100644 index 0000000..01f69c9 --- /dev/null +++ b/pkg/inventory/idmatchstatus_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=IDMatchStatus"; DO NOT EDIT. + +package inventory + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Empty-0] + _ = x[Match-1] + _ = x[NoMatch-2] +} + +const _IDMatchStatus_name = "EmptyMatchNoMatch" + +var _IDMatchStatus_index = [...]uint8{0, 5, 10, 17} + +func (i IDMatchStatus) String() string { + if i < 0 || i >= IDMatchStatus(len(_IDMatchStatus_index)-1) { + return "IDMatchStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _IDMatchStatus_name[_IDMatchStatus_index[i]:_IDMatchStatus_index[i+1]] +} diff --git a/pkg/inventory/inventory-client-factory.go b/pkg/inventory/inventory-client-factory.go index 0ea8beb..27092aa 100644 --- a/pkg/inventory/inventory-client-factory.go +++ b/pkg/inventory/inventory-client-factory.go @@ -6,18 +6,18 @@ package inventory import cmdutil "k8s.io/kubectl/pkg/cmd/util" var ( - _ InventoryClientFactory = ClusterInventoryClientFactory{} + _ ClientFactory = ClusterClientFactory{} ) -// InventoryClientFactory is a factory that constructs new InventoryClient instances. -type InventoryClientFactory interface { - NewInventoryClient(factory cmdutil.Factory) (InventoryClient, error) +// ClientFactory is a factory that constructs new Client instances. +type ClientFactory interface { + NewClient(factory cmdutil.Factory) (Client, error) } -// ClusterInventoryClientFactory is a factory that creates instances of ClusterInventoryClient inventory client. -type ClusterInventoryClientFactory struct { +// ClusterClientFactory is a factory that creates instances of ClusterClient inventory client. +type ClusterClientFactory struct { } -func (ClusterInventoryClientFactory) NewInventoryClient(factory cmdutil.Factory) (InventoryClient, error) { - return NewInventoryClient(factory, WrapInventoryObj, InvInfoToConfigMap) +func (ClusterClientFactory) NewClient(factory cmdutil.Factory) (Client, error) { + return NewClient(factory, WrapInventoryObj, InvInfoToConfigMap) } diff --git a/pkg/inventory/inventory-client.go b/pkg/inventory/inventory-client.go index dae52e0..8199167 100644 --- a/pkg/inventory/inventory-client.go +++ b/pkg/inventory/inventory-client.go @@ -20,49 +20,48 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -// InventoryClient expresses an interface for interacting with +// Client expresses an interface for interacting with // objects which store references to objects (inventory objects). -type InventoryClient interface { +type Client interface { // GetClusterObjs returns the set of previously applied objects as ObjMetadata, // or an error if one occurred. This set of previously applied object references // is stored in the inventory objects living in the cluster. - GetClusterObjs(inv InventoryInfo) (object.ObjMetadataSet, error) + GetClusterObjs(inv Info) (object.ObjMetadataSet, error) // Merge applies the union of the passed objects with the currently // stored objects in the inventory object. Returns the set of // objects which are not in the passed objects (objects to be pruned). // Otherwise, returns an error if one happened. - Merge(inv InventoryInfo, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) + Merge(inv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) // Replace replaces the set of objects stored in the inventory // object with the passed set of objects, or an error if one occurs. - Replace(inv InventoryInfo, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) error + Replace(inv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) error // DeleteInventoryObj deletes the passed inventory object from the APIServer. - DeleteInventoryObj(inv InventoryInfo, dryRun common.DryRunStrategy) error + DeleteInventoryObj(inv Info, dryRun common.DryRunStrategy) error // ApplyInventoryNamespace applies the Namespace that the inventory object should be in. ApplyInventoryNamespace(invNamespace *unstructured.Unstructured, dryRun common.DryRunStrategy) error // GetClusterInventoryInfo returns the cluster inventory object. - GetClusterInventoryInfo(inv InventoryInfo) (*unstructured.Unstructured, error) + GetClusterInventoryInfo(inv Info) (*unstructured.Unstructured, error) // GetClusterInventoryObjs looks up the inventory objects from the cluster. - GetClusterInventoryObjs(inv InventoryInfo) (object.UnstructuredSet, error) + GetClusterInventoryObjs(inv Info) (object.UnstructuredSet, error) } -// ClusterInventoryClient is a concrete implementation of the -// InventoryClient interface. -type ClusterInventoryClient struct { +// ClusterClient is a concrete implementation of the +// Client interface. +type ClusterClient struct { dc dynamic.Interface discoveryClient discovery.CachedDiscoveryInterface mapper meta.RESTMapper InventoryFactoryFunc StorageFactoryFunc - invToUnstructuredFunc InventoryToUnstructuredFunc + invToUnstructuredFunc ToUnstructuredFunc } -var _ InventoryClient = &ClusterInventoryClient{} -var _ InventoryClient = &ClusterInventoryClient{} +var _ Client = &ClusterClient{} -// NewInventoryClient returns a concrete implementation of the -// InventoryClient interface or an error. -func NewInventoryClient(factory cmdutil.Factory, +// NewClient returns a concrete implementation of the +// Client interface or an error. +func NewClient(factory cmdutil.Factory, invFunc StorageFactoryFunc, - invToUnstructuredFunc InventoryToUnstructuredFunc) (*ClusterInventoryClient, error) { + invToUnstructuredFunc ToUnstructuredFunc) (*ClusterClient, error) { dc, err := factory.DynamicClient() if err != nil { return nil, err @@ -75,14 +74,14 @@ func NewInventoryClient(factory cmdutil.Factory, if err != nil { return nil, err } - clusterInventoryClient := ClusterInventoryClient{ + clusterClient := ClusterClient{ dc: dc, discoveryClient: discoveryClinet, mapper: mapper, InventoryFactoryFunc: invFunc, invToUnstructuredFunc: invToUnstructuredFunc, } - return &clusterInventoryClient, nil + return &clusterClient, nil } // Merge stores the union of the passed objects with the objects currently @@ -92,7 +91,7 @@ func NewInventoryClient(factory cmdutil.Factory, // to prune. Creates the initial cluster inventory object storing the passed // objects if an inventory object does not exist. Returns an error if one // occurred. -func (cic *ClusterInventoryClient) Merge(localInv InventoryInfo, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) { +func (cic *ClusterClient) Merge(localInv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) { pruneIds := object.ObjMetadataSet{} invObj := cic.invToUnstructuredFunc(localInv) clusterInv, err := cic.GetClusterInventoryInfo(localInv) @@ -159,7 +158,7 @@ func (cic *ClusterInventoryClient) Merge(localInv InventoryInfo, objs object.Obj // Replace stores the passed objects in the cluster inventory object, or // an error if one occurred. -func (cic *ClusterInventoryClient) Replace(localInv InventoryInfo, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) Replace(localInv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) error { // Skip entire function for dry-run. if dryRun.ClientOrServerDryRun() { klog.V(4).Infoln("dry-run replace inventory object: not applied") @@ -194,7 +193,7 @@ func (cic *ClusterInventoryClient) Replace(localInv InventoryInfo, objs object.O } // replaceInventory stores the passed objects into the passed inventory object. -func (cic *ClusterInventoryClient) replaceInventory(inv *unstructured.Unstructured, objs object.ObjMetadataSet) (*unstructured.Unstructured, error) { +func (cic *ClusterClient) replaceInventory(inv *unstructured.Unstructured, objs object.ObjMetadataSet) (*unstructured.Unstructured, error) { wrappedInv := cic.InventoryFactoryFunc(inv) if err := wrappedInv.Store(objs); err != nil { return nil, err @@ -207,7 +206,7 @@ func (cic *ClusterInventoryClient) replaceInventory(inv *unstructured.Unstructur } // DeleteInventoryObj deletes the inventory object from the cluster. -func (cic *ClusterInventoryClient) DeleteInventoryObj(localInv InventoryInfo, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) DeleteInventoryObj(localInv Info, dryRun common.DryRunStrategy) error { if localInv == nil { return fmt.Errorf("retrieving cluster inventory object with nil local inventory") } @@ -221,7 +220,7 @@ func (cic *ClusterInventoryClient) DeleteInventoryObj(localInv InventoryInfo, dr } } -func (cic *ClusterInventoryClient) deleteInventoryObjsByLabel(inv InventoryInfo, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) deleteInventoryObjsByLabel(inv Info, dryRun common.DryRunStrategy) error { clusterInvObjs, err := cic.getClusterInventoryObjsByLabel(inv) if err != nil { return err @@ -236,7 +235,7 @@ func (cic *ClusterInventoryClient) deleteInventoryObjsByLabel(inv InventoryInfo, // GetClusterObjs returns the objects stored in the cluster inventory object, or // an error if one occurred. -func (cic *ClusterInventoryClient) GetClusterObjs(localInv InventoryInfo) (object.ObjMetadataSet, error) { +func (cic *ClusterClient) GetClusterObjs(localInv Info) (object.ObjMetadataSet, error) { var objs object.ObjMetadataSet clusterInv, err := cic.GetClusterInventoryInfo(localInv) if err != nil { @@ -259,7 +258,7 @@ func (cic *ClusterInventoryClient) GetClusterObjs(localInv InventoryInfo) (objec // // TODO(seans3): Remove the special case code to merge multiple cluster inventory // objects once we've determined that this case is no longer possible. -func (cic *ClusterInventoryClient) GetClusterInventoryInfo(inv InventoryInfo) (*unstructured.Unstructured, error) { +func (cic *ClusterClient) GetClusterInventoryInfo(inv Info) (*unstructured.Unstructured, error) { clusterInvObjects, err := cic.GetClusterInventoryObjs(inv) if err != nil { return nil, fmt.Errorf("failed to read inventory objects from cluster: %w", err) @@ -274,7 +273,7 @@ func (cic *ClusterInventoryClient) GetClusterInventoryInfo(inv InventoryInfo) (* return clusterInv, nil } -func (cic *ClusterInventoryClient) getClusterInventoryObjsByLabel(inv InventoryInfo) (object.UnstructuredSet, error) { +func (cic *ClusterClient) getClusterInventoryObjsByLabel(inv Info) (object.UnstructuredSet, error) { localInv := cic.invToUnstructuredFunc(inv) if localInv == nil { return nil, fmt.Errorf("retrieving cluster inventory object with nil local inventory") @@ -306,7 +305,7 @@ func (cic *ClusterInventoryClient) getClusterInventoryObjsByLabel(inv InventoryI return invList, nil } -func (cic *ClusterInventoryClient) getClusterInventoryObjsByName(inv InventoryInfo) (object.UnstructuredSet, error) { +func (cic *ClusterClient) getClusterInventoryObjsByName(inv Info) (object.UnstructuredSet, error) { localInv := cic.invToUnstructuredFunc(inv) if localInv == nil { return nil, fmt.Errorf("retrieving cluster inventory object with nil local inventory") @@ -329,7 +328,7 @@ func (cic *ClusterInventoryClient) getClusterInventoryObjsByName(inv InventoryIn return object.UnstructuredSet{clusterInv}, nil } -func (cic *ClusterInventoryClient) GetClusterInventoryObjs(inv InventoryInfo) (object.UnstructuredSet, error) { +func (cic *ClusterClient) GetClusterInventoryObjs(inv Info) (object.UnstructuredSet, error) { if inv == nil { return nil, fmt.Errorf("inventoryInfo must be specified") } @@ -348,7 +347,7 @@ func (cic *ClusterInventoryClient) GetClusterInventoryObjs(inv InventoryInfo) (o } // applyInventoryObj applies the passed inventory object to the APIServer. -func (cic *ClusterInventoryClient) applyInventoryObj(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) (*unstructured.Unstructured, error) { +func (cic *ClusterClient) applyInventoryObj(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) (*unstructured.Unstructured, error) { if dryRun.ClientOrServerDryRun() { klog.V(4).Infof("dry-run apply inventory object: not applied") return obj.DeepCopy(), nil @@ -368,7 +367,7 @@ func (cic *ClusterInventoryClient) applyInventoryObj(obj *unstructured.Unstructu } // createInventoryObj creates the passed inventory object on the APIServer. -func (cic *ClusterInventoryClient) createInventoryObj(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) (*unstructured.Unstructured, error) { +func (cic *ClusterClient) createInventoryObj(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) (*unstructured.Unstructured, error) { if dryRun.ClientOrServerDryRun() { klog.V(4).Infof("dry-run create inventory object: not created") return obj.DeepCopy(), nil @@ -395,7 +394,7 @@ func (cic *ClusterInventoryClient) createInventoryObj(obj *unstructured.Unstruct // deleteInventoryObjByName deletes the passed inventory object from the APIServer, or // an error if one occurs. -func (cic *ClusterInventoryClient) deleteInventoryObjByName(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) deleteInventoryObjByName(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { if dryRun.ClientOrServerDryRun() { klog.V(4).Infof("dry-run delete inventory object: not deleted") return nil @@ -416,7 +415,7 @@ func (cic *ClusterInventoryClient) deleteInventoryObjByName(obj *unstructured.Un // ApplyInventoryNamespace creates the passed namespace if it does not already // exist, or returns an error if one happened. NOTE: No error if already exists. -func (cic *ClusterInventoryClient) ApplyInventoryNamespace(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) ApplyInventoryNamespace(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { if dryRun.ClientOrServerDryRun() { klog.V(4).Infof("dry-run apply inventory namespace (%s): not applied", obj.GetName()) return nil @@ -439,11 +438,11 @@ func (cic *ClusterInventoryClient) ApplyInventoryNamespace(obj *unstructured.Uns } // getMapping returns the RESTMapping for the provided resource. -func (cic *ClusterInventoryClient) getMapping(obj *unstructured.Unstructured) (*meta.RESTMapping, error) { +func (cic *ClusterClient) getMapping(obj *unstructured.Unstructured) (*meta.RESTMapping, error) { return cic.mapper.RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version) } -func (cic *ClusterInventoryClient) updateStatus(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { +func (cic *ClusterClient) updateStatus(obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { if dryRun.ClientOrServerDryRun() { klog.V(4).Infof("dry-run update inventory status: not updated") return nil @@ -481,7 +480,7 @@ func (cic *ClusterInventoryClient) updateStatus(obj *unstructured.Unstructured, } // hasSubResource checks if a resource has the given subresource using the discovery client. -func (cic *ClusterInventoryClient) hasSubResource(groupVersion, resource, subresource string) (bool, error) { +func (cic *ClusterClient) hasSubResource(groupVersion, resource, subresource string) (bool, error) { resources, err := cic.discoveryClient.ServerResourcesForGroupVersion(groupVersion) if err != nil { return false, err diff --git a/pkg/inventory/inventory-client_test.go b/pkg/inventory/inventory-client_test.go index 3b44293..df1b80c 100644 --- a/pkg/inventory/inventory-client_test.go +++ b/pkg/inventory/inventory-client_test.go @@ -19,7 +19,7 @@ import ( func TestGetClusterInventoryInfo(t *testing.T) { tests := map[string]struct { - inv InventoryInfo + inv Info localObjs object.ObjMetadataSet isError bool }{ @@ -55,7 +55,7 @@ func TestGetClusterInventoryInfo(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) @@ -89,7 +89,7 @@ func TestGetClusterInventoryInfo(t *testing.T) { func TestMerge(t *testing.T) { tests := map[string]struct { - localInv InventoryInfo + localInv Info localObjs object.ObjMetadataSet clusterObjs object.ObjMetadataSet pruneObjs object.ObjMetadataSet @@ -160,7 +160,7 @@ func TestMerge(t *testing.T) { tf.FakeDynamicClient.PrependReactor("list", "configmaps", toReactionFunc(tc.clusterObjs)) // Create the local inventory object storing "tc.localObjs" - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) @@ -185,7 +185,7 @@ func TestMerge(t *testing.T) { func TestCreateInventory(t *testing.T) { tests := map[string]struct { - inv InventoryInfo + inv Info localObjs object.ObjMetadataSet error string }{ @@ -225,7 +225,7 @@ func TestCreateInventory(t *testing.T) { return true, nil, nil }) - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) inv := invClient.invToUnstructuredFunc(tc.inv) @@ -289,7 +289,7 @@ func TestReplace(t *testing.T) { defer tf.Cleanup() // Client and server dry-run do not throw errors. - invClient, err := NewInventoryClient(tf, WrapInventoryObj, InvInfoToConfigMap) + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) err = invClient.Replace(copyInventory(), object.ObjMetadataSet{}, common.DryRunClient) if err != nil { @@ -303,7 +303,7 @@ func TestReplace(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { // Create inventory client, and store the cluster objs in the inventory object. - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) wrappedInv := invClient.InventoryFactoryFunc(inventoryObj) @@ -334,7 +334,7 @@ func TestReplace(t *testing.T) { func TestGetClusterObjs(t *testing.T) { tests := map[string]struct { - localInv InventoryInfo + localInv Info clusterObjs object.ObjMetadataSet isError bool }{ @@ -366,7 +366,7 @@ func TestGetClusterObjs(t *testing.T) { defer tf.Cleanup() tf.FakeDynamicClient.PrependReactor("list", "configmaps", toReactionFunc(tc.clusterObjs)) - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) clusterObjs, err := invClient.GetClusterObjs(tc.localInv) @@ -388,7 +388,7 @@ func TestGetClusterObjs(t *testing.T) { func TestDeleteInventoryObj(t *testing.T) { tests := map[string]struct { - inv InventoryInfo + inv Info localObjs object.ObjMetadataSet }{ "Nil local inventory object is an error": { @@ -421,7 +421,7 @@ func TestDeleteInventoryObj(t *testing.T) { tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) defer tf.Cleanup() - invClient, err := NewInventoryClient(tf, + invClient, err := NewClient(tf, WrapInventoryObj, InvInfoToConfigMap) require.NoError(t, err) inv := invClient.invToUnstructuredFunc(tc.inv) diff --git a/pkg/inventory/inventory-info.go b/pkg/inventory/inventory-info.go index fabf98a..085becc 100644 --- a/pkg/inventory/inventory-info.go +++ b/pkg/inventory/inventory-info.go @@ -3,19 +3,19 @@ package inventory -type InventoryStrategy string +type Strategy string const ( - NameStrategy InventoryStrategy = "name" - LabelStrategy InventoryStrategy = "label" + NameStrategy Strategy = "name" + LabelStrategy Strategy = "label" ) -// InventoryInfo provides the minimal information for the applier +// Info provides the minimal information for the applier // to create, look up and update an inventory. // The inventory object can be any type, the Provider in the applier // needs to know how to create, look up and update it based -// on the InventoryInfo. -type InventoryInfo interface { +// on the Info. +type Info interface { // Namespace of the inventory object. // It should be the value of the field .metadata.namespace. Namespace() string @@ -29,5 +29,5 @@ type InventoryInfo interface { // if the Id is necessary and how to use it for pruning objects. ID() string - Strategy() InventoryStrategy + Strategy() Strategy } diff --git a/pkg/inventory/inventory.go b/pkg/inventory/inventory.go index 32168e4..923c6bb 100644 --- a/pkg/inventory/inventory.go +++ b/pkg/inventory/inventory.go @@ -40,9 +40,9 @@ type Storage interface { // interface from the passed info object. type StorageFactoryFunc func(*unstructured.Unstructured) Storage -// InventoryToUnstructuredFunc returns the unstructured object for the -// given InventoryInfo. -type InventoryToUnstructuredFunc func(InventoryInfo) *unstructured.Unstructured +// ToUnstructuredFunc returns the unstructured object for the +// given Info. +type ToUnstructuredFunc func(Info) *unstructured.Unstructured // FindInventoryObj returns the "Inventory" object (ConfigMap with // inventory label) if it exists, or nil if it does not exist. diff --git a/pkg/inventory/inventory_error.go b/pkg/inventory/inventory_error.go index ccce9f6..c232b7d 100644 --- a/pkg/inventory/inventory_error.go +++ b/pkg/inventory/inventory_error.go @@ -39,6 +39,7 @@ func (g MultipleInventoryObjError) Error() string { return multipleInventoryErrorStr } +//nolint:revive // redundant name in exported error ok type InventoryNamespaceInSet struct { Namespace string } @@ -46,3 +47,28 @@ type InventoryNamespaceInSet struct { func (g InventoryNamespaceInSet) Error() string { return inventoryNamespaceInSet } + +//nolint:revive // redundant name in exported error ok +type InventoryOverlapError struct { + err error +} + +func (e *InventoryOverlapError) Error() string { + return e.err.Error() +} + +func NewInventoryOverlapError(err error) *InventoryOverlapError { + return &InventoryOverlapError{err: err} +} + +type NeedAdoptionError struct { + err error +} + +func (e *NeedAdoptionError) Error() string { + return e.err.Error() +} + +func NewNeedAdoptionError(err error) *NeedAdoptionError { + return &NeedAdoptionError{err: err} +} diff --git a/pkg/inventory/inventory_test.go b/pkg/inventory/inventory_test.go index 8910f30..3ccfcfe 100644 --- a/pkg/inventory/inventory_test.go +++ b/pkg/inventory/inventory_test.go @@ -416,12 +416,12 @@ func copyInventoryInfo() *unstructured.Unstructured { return inventoryObj.DeepCopy() } -func copyInventory() InventoryInfo { +func copyInventory() Info { u := inventoryObj.DeepCopy() return WrapInventoryInfoObj(u) } -func storeObjsInInventory(info InventoryInfo, objs object.ObjMetadataSet) *unstructured.Unstructured { +func storeObjsInInventory(info Info, objs object.ObjMetadataSet) *unstructured.Unstructured { wrapped := WrapInventoryObj(InvInfoToConfigMap(info)) _ = wrapped.Store(objs) inv, _ := wrapped.GetObject() diff --git a/pkg/inventory/inventorycm.go b/pkg/inventory/inventorycm.go index 924eaec..28780a0 100644 --- a/pkg/inventory/inventorycm.go +++ b/pkg/inventory/inventorycm.go @@ -1,8 +1,8 @@ // Copyright 2020 The Kubernetes Authors. // SPDX-License-Identifier: Apache-2.0 // -// Introduces the InventoryConfigMap struct which implements -// the Inventory interface. The InventoryConfigMap wraps a +// Introduces the ConfigMap struct which implements +// the Inventory interface. The ConfigMap wraps a // ConfigMap resource which stores the set of inventory // (object metadata). @@ -17,62 +17,62 @@ import ( ) // WrapInventoryObj takes a passed ConfigMap (as a resource.Info), -// wraps it with the InventoryConfigMap and upcasts the wrapper as +// wraps it with the ConfigMap and upcasts the wrapper as // an the Inventory interface. func WrapInventoryObj(inv *unstructured.Unstructured) Storage { - return &InventoryConfigMap{inv: inv} + return &ConfigMap{inv: inv} } // WrapInventoryInfoObj takes a passed ConfigMap (as a resource.Info), -// wraps it with the InventoryConfigMap and upcasts the wrapper as -// an the InventoryInfo interface. -func WrapInventoryInfoObj(inv *unstructured.Unstructured) InventoryInfo { - return &InventoryConfigMap{inv: inv} +// wraps it with the ConfigMap and upcasts the wrapper as +// an the Info interface. +func WrapInventoryInfoObj(inv *unstructured.Unstructured) Info { + return &ConfigMap{inv: inv} } -func InvInfoToConfigMap(inv InventoryInfo) *unstructured.Unstructured { - icm, ok := inv.(*InventoryConfigMap) +func InvInfoToConfigMap(inv Info) *unstructured.Unstructured { + icm, ok := inv.(*ConfigMap) if ok { return icm.inv } return nil } -// InventoryConfigMap wraps a ConfigMap resource and implements +// ConfigMap wraps a ConfigMap resource and implements // the Inventory interface. This wrapper loads and stores the // object metadata (inventory) to and from the wrapped ConfigMap. -type InventoryConfigMap struct { +type ConfigMap struct { inv *unstructured.Unstructured objMetas object.ObjMetadataSet } -var _ InventoryInfo = &InventoryConfigMap{} -var _ Storage = &InventoryConfigMap{} +var _ Info = &ConfigMap{} +var _ Storage = &ConfigMap{} -func (icm *InventoryConfigMap) Name() string { +func (icm *ConfigMap) Name() string { return icm.inv.GetName() } -func (icm *InventoryConfigMap) Namespace() string { +func (icm *ConfigMap) Namespace() string { return icm.inv.GetNamespace() } -func (icm *InventoryConfigMap) ID() string { +func (icm *ConfigMap) ID() string { // Empty string if not set. return icm.inv.GetLabels()[common.InventoryLabel] } -func (icm *InventoryConfigMap) Strategy() InventoryStrategy { +func (icm *ConfigMap) Strategy() Strategy { return LabelStrategy } -func (icm *InventoryConfigMap) UnstructuredInventory() *unstructured.Unstructured { +func (icm *ConfigMap) UnstructuredInventory() *unstructured.Unstructured { return icm.inv } // Load is an Inventory interface function returning the set of // object metadata from the wrapped ConfigMap, or an error. -func (icm *InventoryConfigMap) Load() (object.ObjMetadataSet, error) { +func (icm *ConfigMap) Load() (object.ObjMetadataSet, error) { objs := object.ObjMetadataSet{} objMap, exists, err := unstructured.NestedStringMap(icm.inv.Object, "data") if err != nil { @@ -94,14 +94,14 @@ func (icm *InventoryConfigMap) Load() (object.ObjMetadataSet, error) { // Store is an Inventory interface function implemented to store // the object metadata in the wrapped ConfigMap. Actual storing // happens in "GetObject". -func (icm *InventoryConfigMap) Store(objMetas object.ObjMetadataSet) error { +func (icm *ConfigMap) Store(objMetas object.ObjMetadataSet) error { icm.objMetas = objMetas return nil } // GetObject returns the wrapped object (ConfigMap) as a resource.Info // or an error if one occurs. -func (icm *InventoryConfigMap) GetObject() (*unstructured.Unstructured, error) { +func (icm *ConfigMap) GetObject() (*unstructured.Unstructured, error) { // Create the objMap of all the resources, and compute the hash. objMap := buildObjMap(icm.objMetas) // Create the inventory object by copying the template. diff --git a/pkg/inventory/inventoryidmatchstatus_string.go b/pkg/inventory/inventoryidmatchstatus_string.go deleted file mode 100644 index ef3b0e3..0000000 --- a/pkg/inventory/inventoryidmatchstatus_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=inventoryIDMatchStatus"; DO NOT EDIT. - -package inventory - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Empty-0] - _ = x[Match-1] - _ = x[NoMatch-2] -} - -const _inventoryIDMatchStatus_name = "EmptyMatchNoMatch" - -var _inventoryIDMatchStatus_index = [...]uint8{0, 5, 10, 17} - -func (i inventoryIDMatchStatus) String() string { - if i < 0 || i >= inventoryIDMatchStatus(len(_inventoryIDMatchStatus_index)-1) { - return "inventoryIDMatchStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _inventoryIDMatchStatus_name[_inventoryIDMatchStatus_index[i]:_inventoryIDMatchStatus_index[i+1]] -} diff --git a/pkg/inventory/inventorypolicy_string.go b/pkg/inventory/inventorypolicy_string.go deleted file mode 100644 index 6053c41..0000000 --- a/pkg/inventory/inventorypolicy_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=InventoryPolicy"; DO NOT EDIT. - -package inventory - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InventoryPolicyMustMatch-0] - _ = x[AdoptIfNoInventory-1] - _ = x[AdoptAll-2] -} - -const _InventoryPolicy_name = "InventoryPolicyMustMatchAdoptIfNoInventoryAdoptAll" - -var _InventoryPolicy_index = [...]uint8{0, 24, 42, 50} - -func (i InventoryPolicy) String() string { - if i < 0 || i >= InventoryPolicy(len(_InventoryPolicy_index)-1) { - return "InventoryPolicy(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InventoryPolicy_name[_InventoryPolicy_index[i]:_InventoryPolicy_index[i+1]] -} diff --git a/pkg/inventory/policy.go b/pkg/inventory/policy.go index ae42cf8..ff1c39b 100644 --- a/pkg/inventory/policy.go +++ b/pkg/inventory/policy.go @@ -9,18 +9,18 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) -// InventoryPolicy defines if an inventory object can take over +// Policy defines if an inventory object can take over // objects that belong to another inventory object or don't // belong to any inventory object. // This is done by determining if the apply/prune operation // can go through for a resource based on the comparison // the inventory-id value in the package and the owning-inventory // annotation in the live object. -//go:generate stringer -type=InventoryPolicy -type InventoryPolicy int +//go:generate stringer -type=Policy -linecomment +type Policy int const ( - // InvnetoryPolicyMustMatch: This policy enforces that the resources being applied can not + // PolicyMustMatch: This policy enforces that the resources being applied can not // have any overlap with objects in other inventories or objects that already exist // in the cluster but don't belong to an inventory. // @@ -33,9 +33,9 @@ const ( // The prune operation can go through when // - The owning-inventory annotation in the live object match with that // in the package. - InventoryPolicyMustMatch InventoryPolicy = iota + PolicyMustMatch Policy = iota // MustMatch - // AdoptIfNoInventory: This policy enforces that resources being applied + // PolicyAdoptIfNoInventory: This policy enforces that resources being applied // can not have any overlap with objects in other inventories, but are // permitted to take ownership of objects that don't belong to any inventories. // @@ -52,9 +52,9 @@ const ( // - The owning-inventory annotation in the live object match with that // in the package. // - The live object doesn't have the owning-inventory annotation. - AdoptIfNoInventory + PolicyAdoptIfNoInventory // AdoptIfNoInventory - // AdoptAll: This policy will let the current inventory take ownership of any objects. + // PolicyAdoptAll: This policy will let the current inventory take ownership of any objects. // // The apply operation can go through for any resource in the package even if the // live object has an unmatched owning-inventory annotation. @@ -63,24 +63,24 @@ const ( // - The owning-inventory annotation in the live object match or doesn't match with that // in the package. // - The live object doesn't have the owning-inventory annotation. - AdoptAll + PolicyAdoptAll // AdoptAll ) // OwningInventoryKey is the annotation key indicating the inventory owning an object. const OwningInventoryKey = "config.k8s.io/owning-inventory" -// inventoryIDMatchStatus represents the result of comparing the +// IDMatchStatus represents the result of comparing the // id from current inventory info and the inventory-id from a live object. -//go:generate stringer -type=inventoryIDMatchStatus -type inventoryIDMatchStatus int +//go:generate stringer -type=IDMatchStatus +type IDMatchStatus int const ( - Empty inventoryIDMatchStatus = iota + Empty IDMatchStatus = iota Match NoMatch ) -func InventoryIDMatch(inv InventoryInfo, obj *unstructured.Unstructured) inventoryIDMatchStatus { +func IDMatch(inv Info, obj *unstructured.Unstructured) IDMatchStatus { annotations := obj.GetAnnotations() value, found := annotations[OwningInventoryKey] if !found { @@ -92,14 +92,14 @@ func InventoryIDMatch(inv InventoryInfo, obj *unstructured.Unstructured) invento return NoMatch } -func CanApply(inv InventoryInfo, obj *unstructured.Unstructured, policy InventoryPolicy) (bool, error) { +func CanApply(inv Info, obj *unstructured.Unstructured, policy Policy) (bool, error) { if obj == nil { return true, nil } - matchStatus := InventoryIDMatch(inv, obj) + matchStatus := IDMatch(inv, obj) switch matchStatus { case Empty: - if policy != InventoryPolicyMustMatch { + if policy != PolicyMustMatch { return true, nil } err := fmt.Errorf("can't adopt an object without the annotation %s", OwningInventoryKey) @@ -107,7 +107,7 @@ func CanApply(inv InventoryInfo, obj *unstructured.Unstructured, policy Inventor case Match: return true, nil case NoMatch: - if policy == AdoptAll { + if policy == PolicyAdoptAll { return true, nil } err := fmt.Errorf("can't apply the resource since its annotation %s is a different inventory object", OwningInventoryKey) @@ -117,23 +117,23 @@ func CanApply(inv InventoryInfo, obj *unstructured.Unstructured, policy Inventor return false, nil } -func CanPrune(inv InventoryInfo, obj *unstructured.Unstructured, policy InventoryPolicy) bool { +func CanPrune(inv Info, obj *unstructured.Unstructured, policy Policy) bool { if obj == nil { return false } - matchStatus := InventoryIDMatch(inv, obj) + matchStatus := IDMatch(inv, obj) switch matchStatus { case Empty: - return policy == AdoptIfNoInventory || policy == AdoptAll + return policy == PolicyAdoptIfNoInventory || policy == PolicyAdoptAll case Match: return true case NoMatch: - return policy == AdoptAll + return policy == PolicyAdoptAll } return false } -func AddInventoryIDAnnotation(obj *unstructured.Unstructured, inv InventoryInfo) { +func AddInventoryIDAnnotation(obj *unstructured.Unstructured, inv Info) { annotations := obj.GetAnnotations() if annotations == nil { annotations = make(map[string]string) @@ -141,27 +141,3 @@ func AddInventoryIDAnnotation(obj *unstructured.Unstructured, inv InventoryInfo) annotations[OwningInventoryKey] = inv.ID() obj.SetAnnotations(annotations) } - -type InventoryOverlapError struct { - err error -} - -func (e *InventoryOverlapError) Error() string { - return e.err.Error() -} - -func NewInventoryOverlapError(err error) *InventoryOverlapError { - return &InventoryOverlapError{err: err} -} - -type NeedAdoptionError struct { - err error -} - -func (e *NeedAdoptionError) Error() string { - return e.err.Error() -} - -func NewNeedAdoptionError(err error) *NeedAdoptionError { - return &NeedAdoptionError{err: err} -} diff --git a/pkg/inventory/policy_string.go b/pkg/inventory/policy_string.go new file mode 100644 index 0000000..6437165 --- /dev/null +++ b/pkg/inventory/policy_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Policy -linecomment"; DO NOT EDIT. + +package inventory + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PolicyMustMatch-0] + _ = x[PolicyAdoptIfNoInventory-1] + _ = x[PolicyAdoptAll-2] +} + +const _Policy_name = "MustMatchAdoptIfNoInventoryAdoptAll" + +var _Policy_index = [...]uint8{0, 9, 27, 35} + +func (i Policy) String() string { + if i < 0 || i >= Policy(len(_Policy_index)-1) { + return "Policy(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Policy_name[_Policy_index[i]:_Policy_index[i+1]] +} diff --git a/pkg/inventory/policy_test.go b/pkg/inventory/policy_test.go index 7498b46..efb3fe8 100644 --- a/pkg/inventory/policy_test.go +++ b/pkg/inventory/policy_test.go @@ -25,7 +25,7 @@ func (i *fakeInventoryInfo) ID() string { return i.id } -func (i *fakeInventoryInfo) Strategy() InventoryStrategy { +func (i *fakeInventoryInfo) Strategy() Strategy { return NameStrategy } @@ -52,8 +52,8 @@ func TestInventoryIDMatch(t *testing.T) { testcases := []struct { name string obj *unstructured.Unstructured - inv InventoryInfo - expected inventoryIDMatchStatus + inv Info + expected IDMatchStatus }{ { name: "empty", @@ -75,7 +75,7 @@ func TestInventoryIDMatch(t *testing.T) { }, } for _, tc := range testcases { - actual := InventoryIDMatch(tc.inv, tc.obj) + actual := IDMatch(tc.inv, tc.obj) if actual != tc.expected { t.Errorf("expected %v, but got %v", tc.expected, actual) } @@ -86,8 +86,8 @@ func TestCanApply(t *testing.T) { testcases := []struct { name string obj *unstructured.Unstructured - inv InventoryInfo - policy InventoryPolicy + inv Info + policy Policy canApply bool }{ { @@ -100,63 +100,63 @@ func TestCanApply(t *testing.T) { name: "empty with AdoptIfNoInventory", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canApply: true, }, { name: "empty with AdoptAll", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canApply: true, }, { name: "empty with InventoryPolicyMustMatch", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canApply: false, }, { name: "matched with InventoryPolicyMustMatch", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canApply: true, }, { name: "matched with AdoptIfNoInventory", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canApply: true, }, { name: "matched with AloptAll", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canApply: true, }, { name: "unmatched with InventoryPolicyMustMatch", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canApply: false, }, { name: "unmatched with AdoptIfNoInventory", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canApply: false, }, { name: "unmatched with AdoptAll", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canApply: true, }, } @@ -172,8 +172,8 @@ func TestCanPrune(t *testing.T) { testcases := []struct { name string obj *unstructured.Unstructured - inv InventoryInfo - policy InventoryPolicy + inv Info + policy Policy canPrune bool }{ { @@ -186,63 +186,63 @@ func TestCanPrune(t *testing.T) { name: "empty with AdoptIfNoInventory", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canPrune: true, }, { name: "empty with AdoptAll", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canPrune: true, }, { name: "empty with InventoryPolicyMustMatch", obj: testObjectWithAnnotation("", ""), inv: &fakeInventoryInfo{id: "random-id"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canPrune: false, }, { name: "matched with InventoryPolicyMustMatch", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canPrune: true, }, { name: "matched with AdoptIfNoInventory", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canPrune: true, }, { name: "matched with AloptAll", obj: testObjectWithAnnotation(OwningInventoryKey, "matched"), inv: &fakeInventoryInfo{id: "matched"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canPrune: true, }, { name: "unmatched with InventoryPolicyMustMatch", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: InventoryPolicyMustMatch, + policy: PolicyMustMatch, canPrune: false, }, { name: "unmatched with AdoptIfNoInventory", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptIfNoInventory, + policy: PolicyAdoptIfNoInventory, canPrune: false, }, { name: "unmatched with AdoptAll", obj: testObjectWithAnnotation(OwningInventoryKey, "unmatched"), inv: &fakeInventoryInfo{id: "random-id"}, - policy: AdoptAll, + policy: PolicyAdoptAll, canPrune: true, }, } diff --git a/pkg/kstatus/polling/clusterreader/fake/fake.go b/pkg/kstatus/polling/clusterreader/fake/fake.go index 7ecbb60..10725be 100644 --- a/pkg/kstatus/polling/clusterreader/fake/fake.go +++ b/pkg/kstatus/polling/clusterreader/fake/fake.go @@ -11,7 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type FakeClusterReader struct { +type ClusterReader struct { NoopClusterReader GetResource *unstructured.Unstructured @@ -23,21 +23,21 @@ type FakeClusterReader struct { SyncErr error } -func (f *FakeClusterReader) Get(_ context.Context, _ client.ObjectKey, u *unstructured.Unstructured) error { +func (f *ClusterReader) Get(_ context.Context, _ client.ObjectKey, u *unstructured.Unstructured) error { if f.GetResource != nil { u.Object = f.GetResource.Object } return f.GetErr } -func (f *FakeClusterReader) ListNamespaceScoped(_ context.Context, list *unstructured.UnstructuredList, _ string, _ labels.Selector) error { +func (f *ClusterReader) ListNamespaceScoped(_ context.Context, list *unstructured.UnstructuredList, _ string, _ labels.Selector) error { if f.ListResources != nil { list.Items = f.ListResources.Items } return f.ListErr } -func (f *FakeClusterReader) Sync(_ context.Context) error { +func (f *ClusterReader) Sync(_ context.Context) error { return f.SyncErr } diff --git a/pkg/kstatus/polling/collector/collector.go b/pkg/kstatus/polling/collector/collector.go index db0bc92..9869b5e 100644 --- a/pkg/kstatus/polling/collector/collector.go +++ b/pkg/kstatus/polling/collector/collector.go @@ -51,7 +51,7 @@ func (o ObserverFunc) Notify(rsc *ResourceStatusCollector, e event.Event) { type ResourceStatusCollector struct { mux sync.RWMutex - LastEventType event.EventType + LastEventType event.Type ResourceStatuses map[object.ObjMetadata]*event.ResourceStatus @@ -99,12 +99,12 @@ func (o *ResourceStatusCollector) ListenWithObserver(eventChannel <-chan event.E func (o *ResourceStatusCollector) processEvent(e event.Event) error { o.mux.Lock() defer o.mux.Unlock() - o.LastEventType = e.EventType - if e.EventType == event.ErrorEvent { + o.LastEventType = e.Type + if e.Type == event.ErrorEvent { o.Error = e.Error return e.Error } - if e.EventType == event.ResourceUpdateEvent { + if e.Type == event.ResourceUpdateEvent { resourceStatus := e.Resource o.ResourceStatuses[resourceStatus.Identifier] = resourceStatus } @@ -114,7 +114,7 @@ func (o *ResourceStatusCollector) processEvent(e event.Event) error { // Observation contains the latest state known by the collector as returned // by a call to the LatestObservation function. type Observation struct { - LastEventType event.EventType + LastEventType event.Type ResourceStatuses []*event.ResourceStatus diff --git a/pkg/kstatus/polling/collector/collector_test.go b/pkg/kstatus/polling/collector/collector_test.go index 62cef14..d420cf5 100644 --- a/pkg/kstatus/polling/collector/collector_test.go +++ b/pkg/kstatus/polling/collector/collector_test.go @@ -49,8 +49,8 @@ func TestCollectorWithFatalError(t *testing.T) { exampleErr := fmt.Errorf("this is a test error") eventCh <- event.Event{ - EventType: event.ErrorEvent, - Error: exampleErr, + Type: event.ErrorEvent, + Error: exampleErr, } var err error @@ -101,7 +101,7 @@ func TestCollectorEventProcessing(t *testing.T) { }, events: []event.Event{ { - EventType: event.ResourceUpdateEvent, + Type: event.ResourceUpdateEvent, Resource: &event.ResourceStatus{ Identifier: resourceIdentifiers["deployment"], }, @@ -115,25 +115,25 @@ func TestCollectorEventProcessing(t *testing.T) { }, events: []event.Event{ { - EventType: event.ResourceUpdateEvent, + Type: event.ResourceUpdateEvent, Resource: &event.ResourceStatus{ Identifier: resourceIdentifiers["deployment"], }, }, { - EventType: event.ResourceUpdateEvent, + Type: event.ResourceUpdateEvent, Resource: &event.ResourceStatus{ Identifier: resourceIdentifiers["statefulSet"], }, }, { - EventType: event.ResourceUpdateEvent, + Type: event.ResourceUpdateEvent, Resource: &event.ResourceStatus{ Identifier: resourceIdentifiers["deployment"], }, }, { - EventType: event.ResourceUpdateEvent, + Type: event.ResourceUpdateEvent, Resource: &event.ResourceStatus{ Identifier: resourceIdentifiers["statefulSet"], }, @@ -169,7 +169,7 @@ func TestCollectorEventProcessing(t *testing.T) { var expectedObservation *Observation if latestEvent != nil { expectedObservation = &Observation{ - LastEventType: latestEvent.EventType, + LastEventType: latestEvent.Type, } } else { expectedObservation = &Observation{} diff --git a/pkg/kstatus/polling/engine/engine.go b/pkg/kstatus/polling/engine/engine.go index 25f5e89..57b73e6 100644 --- a/pkg/kstatus/polling/engine/engine.go +++ b/pkg/kstatus/polling/engine/engine.go @@ -78,8 +78,8 @@ func (s *PollerEngine) Poll(ctx context.Context, identifiers object.ObjMetadataS func handleError(eventChannel chan event.Event, err error) { eventChannel <- event.Event{ - EventType: event.ErrorEvent, - Error: err, + Type: event.ErrorEvent, + Error: err, } } @@ -197,8 +197,8 @@ func (r *statusPollerRunner) handleSyncAndPollErr(err error) { return } r.eventChannel <- event.Event{ - EventType: event.ErrorEvent, - Error: err, + Type: event.ErrorEvent, + Error: err, } } @@ -236,8 +236,8 @@ func (r *statusPollerRunner) pollStatusForAllResources() error { if r.isUpdatedResourceStatus(resourceStatus) { r.previousResourceStatuses[id] = resourceStatus r.eventChannel <- event.Event{ - EventType: event.ResourceUpdateEvent, - Resource: resourceStatus, + Type: event.ResourceUpdateEvent, + Resource: resourceStatus, } } } diff --git a/pkg/kstatus/polling/engine/engine_test.go b/pkg/kstatus/polling/engine/engine_test.go index dd620e5..955718a 100644 --- a/pkg/kstatus/polling/engine/engine_test.go +++ b/pkg/kstatus/polling/engine/engine_test.go @@ -27,7 +27,7 @@ func TestStatusPollerRunner(t *testing.T) { testCases := map[string]struct { identifiers object.ObjMetadataSet defaultStatusReader StatusReader - expectedEventTypes []event.EventType + expectedEventTypes []event.Type }{ "single resource": { identifiers: object.ObjMetadataSet{ @@ -49,7 +49,7 @@ func TestStatusPollerRunner(t *testing.T) { }, resourceStatusCount: make(map[schema.GroupKind]int), }, - expectedEventTypes: []event.EventType{ + expectedEventTypes: []event.Type{ event.ResourceUpdateEvent, event.ResourceUpdateEvent, }, @@ -87,7 +87,7 @@ func TestStatusPollerRunner(t *testing.T) { }, resourceStatusCount: make(map[schema.GroupKind]int), }, - expectedEventTypes: []event.EventType{ + expectedEventTypes: []event.Type{ event.ResourceUpdateEvent, event.ResourceUpdateEvent, event.ResourceUpdateEvent, @@ -123,9 +123,9 @@ func TestStatusPollerRunner(t *testing.T) { eventChannel := engine.Poll(ctx, identifiers, options) - var eventTypes []event.EventType + var eventTypes []event.Type for ch := range eventChannel { - eventTypes = append(eventTypes, ch.EventType) + eventTypes = append(eventTypes, ch.Type) if len(eventTypes) == len(tc.expectedEventTypes) { cancel() } @@ -201,7 +201,7 @@ func TestNewStatusPollerRunnerCancellationWithMultipleResources(t *testing.T) { engine := PollerEngine{ Mapper: fakeMapper, ClusterReaderFactory: ClusterReaderFactoryFunc(func(client.Reader, meta.RESTMapper, object.ObjMetadataSet) (ClusterReader, error) { - return &fakecr.FakeClusterReader{ + return &fakecr.ClusterReader{ SyncErr: context.Canceled, }, nil }), @@ -257,8 +257,8 @@ func TestNewStatusPollerRunnerIdentifierValidation(t *testing.T) { defer timer.Stop() select { case e := <-eventChannel: - if e.EventType != event.ErrorEvent { - t.Errorf("expected an error event, but got %s", e.EventType.String()) + if e.Type != event.ErrorEvent { + t.Errorf("expected an error event, but got %s", e.Type.String()) return } err := e.Error diff --git a/pkg/kstatus/polling/event/event.go b/pkg/kstatus/polling/event/event.go index 499d660..9a91c97 100644 --- a/pkg/kstatus/polling/event/event.go +++ b/pkg/kstatus/polling/event/event.go @@ -9,15 +9,15 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -// EventType is the type that describes the type of an Event that is passed back to the caller +// Type is the type that describes the type of an Event that is passed back to the caller // as resources in the cluster are being polled. // -//go:generate stringer -type=EventType -type EventType int +//go:generate stringer -type=Type +type Type int const ( // ResourceUpdateEvent describes events related to a change in the status of one of the polled resources. - ResourceUpdateEvent EventType = iota + ResourceUpdateEvent Type = iota // ErrorEvent signals that the engine has encountered an error that it can not recover from. The engine // is shutting down and the event channel will be closed after this event. ErrorEvent @@ -26,8 +26,8 @@ const ( // Event defines that type that is passed back through the event channel to notify the caller of changes // as resources are being polled. type Event struct { - // EventType defines the type of event. - EventType EventType + // Type defines the type of event. + Type Type // Resource is only available for ResourceUpdateEvents. It includes information about the resource, // including the resource status, any errors and the resource itself (as an unstructured). diff --git a/pkg/kstatus/polling/event/eventtype_string.go b/pkg/kstatus/polling/event/eventtype_string.go deleted file mode 100644 index ba39dc7..0000000 --- a/pkg/kstatus/polling/event/eventtype_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=EventType"; DO NOT EDIT. - -package event - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ResourceUpdateEvent-0] - _ = x[ErrorEvent-1] -} - -const _EventType_name = "ResourceUpdateEventErrorEvent" - -var _EventType_index = [...]uint8{0, 19, 29} - -func (i EventType) String() string { - if i < 0 || i >= EventType(len(_EventType_index)-1) { - return "EventType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _EventType_name[_EventType_index[i]:_EventType_index[i+1]] -} diff --git a/pkg/kstatus/polling/event/type_string.go b/pkg/kstatus/polling/event/type_string.go new file mode 100644 index 0000000..ea30904 --- /dev/null +++ b/pkg/kstatus/polling/event/type_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package event + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ResourceUpdateEvent-0] + _ = x[ErrorEvent-1] +} + +const _Type_name = "ResourceUpdateEventErrorEvent" + +var _Type_index = [...]uint8{0, 19, 29} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/pkg/kstatus/polling/statusreaders/common_test.go b/pkg/kstatus/polling/statusreaders/common_test.go index 7455ee1..2f98a4d 100644 --- a/pkg/kstatus/polling/statusreaders/common_test.go +++ b/pkg/kstatus/polling/statusreaders/common_test.go @@ -80,7 +80,7 @@ func TestLookupResource(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { - fakeReader := &fakecr.FakeClusterReader{ + fakeReader := &fakecr.ClusterReader{ GetErr: tc.readerErr, } fakeMapper := fakemapper.NewFakeRESTMapper(deploymentGVK) @@ -201,14 +201,14 @@ spec: for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { - fakeClusterReader := &fakecr.FakeClusterReader{ + fakeClusterReader := &fakecr.ClusterReader{ ListResources: &unstructured.UnstructuredList{ Items: tc.listObjects, }, ListErr: tc.listErr, } fakeMapper := fakemapper.NewFakeRESTMapper(rsGVK) - fakeStatusReader := &fakesr.FakeStatusReader{} + fakeStatusReader := &fakesr.StatusReader{} object := testutil.YamlToUnstructured(t, tc.manifest) diff --git a/pkg/kstatus/polling/statusreaders/deployment_test.go b/pkg/kstatus/polling/statusreaders/deployment_test.go index c6b0d6d..4c6e2c3 100644 --- a/pkg/kstatus/polling/statusreaders/deployment_test.go +++ b/pkg/kstatus/polling/statusreaders/deployment_test.go @@ -85,13 +85,13 @@ func TestReadStatus(t *testing.T) { for tn := range testCases { tc := testCases[tn] t.Run(tn, func(t *testing.T) { - fakeReader := &fakecr.FakeClusterReader{ + fakeReader := &fakecr.ClusterReader{ GetResource: tc.readerResource, GetErr: tc.readerErr, } fakeMapper := fakemapper.NewFakeRESTMapper(deploymentGVK, replicaSetGVK) - fakeStatusReader := &fake.FakeStatusReader{} + fakeStatusReader := &fake.StatusReader{} statusReader := NewDeploymentResourceReader(fakeMapper, fakeStatusReader) rs, err := statusReader.ReadStatus(context.Background(), fakeReader, tc.identifier) diff --git a/pkg/kstatus/polling/statusreaders/fake/fake.go b/pkg/kstatus/polling/statusreaders/fake/fake.go index 4feb393..aec3aeb 100644 --- a/pkg/kstatus/polling/statusreaders/fake/fake.go +++ b/pkg/kstatus/polling/statusreaders/fake/fake.go @@ -13,17 +13,17 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -type FakeStatusReader struct{} +type StatusReader struct{} -func (f *FakeStatusReader) Supports(schema.GroupKind) bool { +func (f *StatusReader) Supports(schema.GroupKind) bool { return true } -func (f *FakeStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, _ object.ObjMetadata) (*event.ResourceStatus, error) { +func (f *StatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, _ object.ObjMetadata) (*event.ResourceStatus, error) { return nil, nil } -func (f *FakeStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, obj *unstructured.Unstructured) (*event.ResourceStatus, error) { +func (f *StatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, obj *unstructured.Unstructured) (*event.ResourceStatus, error) { identifier := object.UnstructuredToObjMetadata(obj) return &event.ResourceStatus{ Identifier: identifier, diff --git a/pkg/manifestreader/fake-loader.go b/pkg/manifestreader/fake-loader.go index 3b21f4d..83c9254 100644 --- a/pkg/manifestreader/fake-loader.go +++ b/pkg/manifestreader/fake-loader.go @@ -13,22 +13,22 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -type fakeLoader struct { - factory util.Factory - InvClient *inventory.FakeInventoryClient +type FakeLoader struct { + Factory util.Factory + InvClient *inventory.FakeClient } -var _ ManifestLoader = &fakeLoader{} +var _ ManifestLoader = &FakeLoader{} -func NewFakeLoader(f util.Factory, objs object.ObjMetadataSet) *fakeLoader { - return &fakeLoader{ - factory: f, - InvClient: inventory.NewFakeInventoryClient(objs), +func NewFakeLoader(f util.Factory, objs object.ObjMetadataSet) *FakeLoader { + return &FakeLoader{ + Factory: f, + InvClient: inventory.NewFakeClient(objs), } } -func (f *fakeLoader) ManifestReader(reader io.Reader, _ string) (ManifestReader, error) { - mapper, err := f.factory.ToRESTMapper() +func (f *FakeLoader) ManifestReader(reader io.Reader, _ string) (ManifestReader, error) { + mapper, err := f.Factory.ToRESTMapper() if err != nil { return nil, err } @@ -44,7 +44,7 @@ func (f *fakeLoader) ManifestReader(reader io.Reader, _ string) (ManifestReader, }, nil } -func (f *fakeLoader) InventoryInfo(objs []*unstructured.Unstructured) (inventory.InventoryInfo, []*unstructured.Unstructured, error) { +func (f *FakeLoader) InventoryInfo(objs []*unstructured.Unstructured) (inventory.Info, []*unstructured.Unstructured, error) { inv, objs, err := inventory.SplitUnstructureds(objs) return inventory.WrapInventoryInfoObj(inv), objs, err } diff --git a/pkg/print/table/columndefs.go b/pkg/print/table/columndefs.go index 5f1455f..59c6b83 100644 --- a/pkg/print/table/columndefs.go +++ b/pkg/print/table/columndefs.go @@ -170,7 +170,7 @@ var ( if err != nil { return realLength, err } - realLength += 1 + realLength++ } } return realLength, nil diff --git a/pkg/printers/table/collector.go b/pkg/printers/table/collector.go index 08f18ae..c5b35df 100644 --- a/pkg/printers/table/collector.go +++ b/pkg/printers/table/collector.go @@ -20,8 +20,8 @@ import ( const InvalidStatus status.Status = "Invalid" -func newResourceStateCollector(resourceGroups []event.ActionGroup) *ResourceStateCollector { - resourceInfos := make(map[object.ObjMetadata]*ResourceInfo) +func newResourceStateCollector(resourceGroups []event.ActionGroup) *resourceStateCollector { + resourceInfos := make(map[object.ObjMetadata]*resourceInfo) for _, group := range resourceGroups { action := group.Action // Keep the action that describes the operation for the resource @@ -30,7 +30,7 @@ func newResourceStateCollector(resourceGroups []event.ActionGroup) *ResourceStat continue } for _, identifier := range group.Identifiers { - resourceInfos[identifier] = &ResourceInfo{ + resourceInfos[identifier] = &resourceInfo{ identifier: identifier, resourceStatus: &pe.ResourceStatus{ Identifier: identifier, @@ -40,31 +40,31 @@ func newResourceStateCollector(resourceGroups []event.ActionGroup) *ResourceStat } } } - return &ResourceStateCollector{ + return &resourceStateCollector{ resourceInfos: resourceInfos, } } -// ResourceStateCollector consumes the events from the applier +// resourceStateCollector consumes the events from the applier // eventChannel and keeps track of the latest state for all resources. // It also provides functionality for fetching the latest seen // state and return it in format that can be used by the // BaseTablePrinter. -type ResourceStateCollector struct { +type resourceStateCollector struct { mux sync.RWMutex // resourceInfos contains a mapping from the unique // resource identifier to a ResourceInfo object that captures // the latest state for the given resource. - resourceInfos map[object.ObjMetadata]*ResourceInfo + resourceInfos map[object.ObjMetadata]*resourceInfo err error } -// ResourceInfo captures the latest seen state of a single resource. +// resourceInfo captures the latest seen state of a single resource. // This is used for top-level resources that have a ResourceAction // associated with them. -type ResourceInfo struct { +type resourceInfo struct { // identifier contains the information that identifies a // single resource. identifier object.ObjMetadata @@ -100,55 +100,55 @@ type ResourceInfo struct { } // Identifier returns the identifier for the given resource. -func (r *ResourceInfo) Identifier() object.ObjMetadata { +func (r *resourceInfo) Identifier() object.ObjMetadata { return r.identifier } // ResourceStatus returns the latest seen status for the // resource. -func (r *ResourceInfo) ResourceStatus() *pe.ResourceStatus { +func (r *resourceInfo) ResourceStatus() *pe.ResourceStatus { return r.resourceStatus } // SubResources returns a slice of Resource which contains // any resources created and managed by this resource. -func (r *ResourceInfo) SubResources() []table.Resource { +func (r *resourceInfo) SubResources() []table.Resource { var resources []table.Resource for _, res := range r.resourceStatus.GeneratedResources { - resources = append(resources, &SubResourceInfo{ + resources = append(resources, &subResourceInfo{ resourceStatus: res, }) } return resources } -// SubResourceInfo captures the latest seen state of a +// subResourceInfo captures the latest seen state of a // single subResource, i.e. resources that are created and // managed by one of the top-level resources we either apply // or prune. -type SubResourceInfo struct { +type subResourceInfo struct { // resourceStatus contains the latest status information // about the subResource. resourceStatus *pe.ResourceStatus } // Identifier returns the identifier for the given subResource. -func (r *SubResourceInfo) Identifier() object.ObjMetadata { +func (r *subResourceInfo) Identifier() object.ObjMetadata { return r.resourceStatus.Identifier } // ResourceStatus returns the latest seen status for the // subResource. -func (r *SubResourceInfo) ResourceStatus() *pe.ResourceStatus { +func (r *subResourceInfo) ResourceStatus() *pe.ResourceStatus { return r.resourceStatus } // SubResources returns a slice of Resource which contains // any resources created and managed by this resource. -func (r *SubResourceInfo) SubResources() []table.Resource { +func (r *subResourceInfo) SubResources() []table.Resource { var resources []table.Resource for _, res := range r.resourceStatus.GeneratedResources { - resources = append(resources, &SubResourceInfo{ + resources = append(resources, &subResourceInfo{ resourceStatus: res, }) } @@ -162,7 +162,7 @@ func (r *SubResourceInfo) SubResources() []table.Resource { // The function returns a channel. When this channel is closed, the // goroutine has processed all events in the eventChannel and // exited. -func (r *ResourceStateCollector) Listen(eventChannel <-chan event.Event) <-chan listenerResult { +func (r *resourceStateCollector) Listen(eventChannel <-chan event.Event) <-chan listenerResult { completed := make(chan listenerResult) go func() { defer close(completed) @@ -181,7 +181,7 @@ type listenerResult struct { } // processEvent processes an event and updates the state. -func (r *ResourceStateCollector) processEvent(ev event.Event) error { +func (r *resourceStateCollector) processEvent(ev event.Event) error { r.mux.Lock() defer r.mux.Unlock() switch ev.Type { @@ -203,7 +203,7 @@ func (r *ResourceStateCollector) processEvent(ev event.Event) error { // processValidationEvent handles events pertaining to a validation error // for a resource. -func (r *ResourceStateCollector) processValidationEvent(e event.ValidationEvent) error { +func (r *resourceStateCollector) processValidationEvent(e event.ValidationEvent) error { klog.V(7).Infoln("processing validation event") // unwrap validation errors err := e.Error @@ -231,7 +231,7 @@ func (r *ResourceStateCollector) processValidationEvent(e event.ValidationEvent) // processStatusEvent handles events pertaining to a status // update for a resource. -func (r *ResourceStateCollector) processStatusEvent(e event.StatusEvent) { +func (r *resourceStateCollector) processStatusEvent(e event.StatusEvent) { klog.V(7).Infoln("processing status event") previous, found := r.resourceInfos[e.Identifier] if !found { @@ -242,7 +242,7 @@ func (r *ResourceStateCollector) processStatusEvent(e event.StatusEvent) { } // processApplyEvent handles events relating to apply operations -func (r *ResourceStateCollector) processApplyEvent(e event.ApplyEvent) { +func (r *resourceStateCollector) processApplyEvent(e event.ApplyEvent) { identifier := e.Identifier klog.V(7).Infof("processing apply event for %s", identifier) previous, found := r.resourceInfos[identifier] @@ -257,7 +257,7 @@ func (r *ResourceStateCollector) processApplyEvent(e event.ApplyEvent) { } // processPruneEvent handles event related to prune operations. -func (r *ResourceStateCollector) processPruneEvent(e event.PruneEvent) { +func (r *resourceStateCollector) processPruneEvent(e event.PruneEvent) { identifier := e.Identifier klog.V(7).Infof("processing prune event for %s", identifier) previous, found := r.resourceInfos[identifier] @@ -272,7 +272,7 @@ func (r *ResourceStateCollector) processPruneEvent(e event.PruneEvent) { } // processPruneEvent handles event related to prune operations. -func (r *ResourceStateCollector) processWaitEvent(e event.WaitEvent) { +func (r *resourceStateCollector) processWaitEvent(e event.WaitEvent) { identifier := e.Identifier klog.V(7).Infof("processing wait event for %s", identifier) previous, found := r.resourceInfos[identifier] @@ -306,13 +306,13 @@ func (r *ResourceState) Error() error { // LatestState returns a ResourceState object that contains // a copy of the latest state for all resources. -func (r *ResourceStateCollector) LatestState() *ResourceState { +func (r *resourceStateCollector) LatestState() *ResourceState { r.mux.RLock() defer r.mux.RUnlock() var resourceInfos ResourceInfos for _, ri := range r.resourceInfos { - resourceInfos = append(resourceInfos, &ResourceInfo{ + resourceInfos = append(resourceInfos, &resourceInfo{ identifier: ri.identifier, resourceStatus: ri.resourceStatus, ResourceAction: ri.ResourceAction, @@ -332,7 +332,7 @@ func (r *ResourceStateCollector) LatestState() *ResourceState { // Stats returns a summary of the results from the actuation operation // as a stats.Stats object. -func (r *ResourceStateCollector) Stats() stats.Stats { +func (r *resourceStateCollector) Stats() stats.Stats { var s stats.Stats for _, res := range r.resourceInfos { switch res.ResourceAction { @@ -357,7 +357,7 @@ func (r *ResourceStateCollector) Stats() stats.Stats { return s } -type ResourceInfos []*ResourceInfo +type ResourceInfos []*resourceInfo func (g ResourceInfos) Len() int { return len(g) diff --git a/pkg/printers/table/collector_test.go b/pkg/printers/table/collector_test.go index 0e34776..ec3cdec 100644 --- a/pkg/printers/table/collector_test.go +++ b/pkg/printers/table/collector_test.go @@ -48,11 +48,11 @@ const testMessage = "test message for ResourceStatus" func TestResourceStateCollector_New(t *testing.T) { testCases := map[string]struct { resourceGroups []event.ActionGroup - resourceInfos map[object.ObjMetadata]*ResourceInfo + resourceInfos map[object.ObjMetadata]*resourceInfo }{ "no resources": { resourceGroups: []event.ActionGroup{}, - resourceInfos: map[object.ObjMetadata]*ResourceInfo{}, + resourceInfos: map[object.ObjMetadata]*resourceInfo{}, }, "several resources for apply": { resourceGroups: []event.ActionGroup{ @@ -63,7 +63,7 @@ func TestResourceStateCollector_New(t *testing.T) { }, }, }, - resourceInfos: map[object.ObjMetadata]*ResourceInfo{ + resourceInfos: map[object.ObjMetadata]*resourceInfo{ depID: { ResourceAction: event.ApplyAction, }, @@ -87,7 +87,7 @@ func TestResourceStateCollector_New(t *testing.T) { }, }, }, - resourceInfos: map[object.ObjMetadata]*ResourceInfo{ + resourceInfos: map[object.ObjMetadata]*resourceInfo{ depID: { ResourceAction: event.PruneAction, }, diff --git a/pkg/printers/table/printer.go b/pkg/printers/table/printer.go index ce491c2..a07cdfa 100644 --- a/pkg/printers/table/printer.go +++ b/pkg/printers/table/printer.go @@ -85,9 +85,9 @@ var ( ColumnWidth: 12, PrintResourceFunc: func(w io.Writer, width int, r table.Resource) (int, error) { - var resInfo *ResourceInfo + var resInfo *resourceInfo switch res := r.(type) { - case *ResourceInfo: + case *resourceInfo: resInfo = res default: return 0, nil @@ -122,9 +122,9 @@ var ( int, error, ) { - var resInfo *ResourceInfo + var resInfo *resourceInfo switch res := r.(type) { - case *ResourceInfo: + case *resourceInfo: resInfo = res default: return 0, nil @@ -158,7 +158,7 @@ var ( // runPrintLoop starts a new goroutine that will regularly fetch the // latest state from the collector and update the table. -func (t *Printer) runPrintLoop(coll *ResourceStateCollector, stop chan struct{}) chan struct{} { +func (t *Printer) runPrintLoop(coll *resourceStateCollector, stop chan struct{}) chan struct{} { finished := make(chan struct{}) baseTablePrinter := table.BaseTablePrinter{ diff --git a/pkg/printers/table/printer_test.go b/pkg/printers/table/printer_test.go index b58ac56..ba79e54 100644 --- a/pkg/printers/table/printer_test.go +++ b/pkg/printers/table/printer_test.go @@ -26,17 +26,17 @@ func TestActionColumnDef(t *testing.T) { expectedOutput string }{ "unexpected implementation of Resource interface": { - resource: &SubResourceInfo{}, + resource: &subResourceInfo{}, columnWidth: 15, expectedOutput: "", }, "neither applied nor pruned": { - resource: &ResourceInfo{}, + resource: &resourceInfo{}, columnWidth: 15, expectedOutput: "", }, "applied": { - resource: &ResourceInfo{ + resource: &resourceInfo{ ResourceAction: event.ApplyAction, ApplyOpResult: createdOpResult, }, @@ -44,7 +44,7 @@ func TestActionColumnDef(t *testing.T) { expectedOutput: "Created", }, "pruned": { - resource: &ResourceInfo{ + resource: &resourceInfo{ ResourceAction: event.PruneAction, PruneOpResult: prunedOpResult, }, @@ -52,7 +52,7 @@ func TestActionColumnDef(t *testing.T) { expectedOutput: "Pruned", }, "trimmed output": { - resource: &ResourceInfo{ + resource: &resourceInfo{ ResourceAction: event.ApplyAction, ApplyOpResult: createdOpResult, }, diff --git a/pkg/testutil/events.go b/pkg/testutil/events.go index aee325a..465c7b0 100644 --- a/pkg/testutil/events.go +++ b/pkg/testutil/events.go @@ -89,7 +89,7 @@ func VerifyEvents(expEvents []ExpEvent, events []event.Event) error { e := events[i] ee := expEvents[expEventIndex] if isMatch(ee, e) { - expEventIndex += 1 + expEventIndex++ if expEventIndex >= len(expEvents) { return nil } diff --git a/pkg/testutil/matcher.go b/pkg/testutil/matcher.go index 98d4cd5..10ba412 100644 --- a/pkg/testutil/matcher.go +++ b/pkg/testutil/matcher.go @@ -57,7 +57,7 @@ func indent(in string, indentation uint) string { // any error with the same type as the supplied error. // // Use with testutil.Equal to handle error comparisons. -func EqualErrorType(err error) equalErrorType { +func EqualErrorType(err error) error { return equalErrorType{ err: err, } @@ -86,7 +86,7 @@ func (e equalErrorType) Unwrap() error { // any error with the same Error() as the supplied string value. // // Use with testutil.Equal to handle error comparisons. -func EqualErrorString(err string) equalErrorString { +func EqualErrorString(err string) error { return equalErrorString{ err: err, } diff --git a/test/e2e/apply_and_destroy_test.go b/test/e2e/apply_and_destroy_test.go index 746e41b..76a641d 100644 --- a/test/e2e/apply_and_destroy_test.go +++ b/test/e2e/apply_and_destroy_test.go @@ -192,7 +192,7 @@ func applyAndDestroyTest(ctx context.Context, c client.Client, invConfig Invento By("Destroy resources") destroyer := invConfig.DestroyerFactoryFunc() - options := apply.DestroyerOptions{InventoryPolicy: inventory.AdoptIfNoInventory} + options := apply.DestroyerOptions{InventoryPolicy: inventory.PolicyAdoptIfNoInventory} destroyerEvents := runCollect(destroyer.Run(ctx, inventoryInfo, options)) expEvents = []testutil.ExpEvent{ @@ -291,13 +291,13 @@ func applyAndDestroyTest(ctx context.Context, c client.Client, invConfig Invento assertUnstructuredDoesNotExist(ctx, c, deployment1Obj) } -func createInventoryInfo(invConfig InventoryConfig, inventoryName, namespaceName, inventoryID string) inventory.InventoryInfo { - switch invConfig.InventoryStrategy { +func createInventoryInfo(invConfig InventoryConfig, inventoryName, namespaceName, inventoryID string) inventory.Info { + switch invConfig.Strategy { case inventory.NameStrategy: - return invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, randomString("inventory-"))) + return invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, randomString("inventory-"))) case inventory.LabelStrategy: - return invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(randomString("inventory-"), namespaceName, inventoryID)) + return invConfig.InvWrapperFunc(invConfig.FactoryFunc(randomString("inventory-"), namespaceName, inventoryID)) default: - panic(fmt.Errorf("unknown inventory strategy %q", invConfig.InventoryStrategy)) + panic(fmt.Errorf("unknown inventory strategy %q", invConfig.Strategy)) } } diff --git a/test/e2e/continue_on_error_test.go b/test/e2e/continue_on_error_test.go index 965260e..18395c7 100644 --- a/test/e2e/continue_on_error_test.go +++ b/test/e2e/continue_on_error_test.go @@ -23,7 +23,7 @@ func continueOnErrorTest(ctx context.Context, c client.Client, invConfig Invento By("apply an invalid CRD") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) invalidCrdObj := manifestToUnstructured(invalidCrd) pod1Obj := withNamespace(manifestToUnstructured(pod1), namespaceName) diff --git a/test/e2e/crd_test.go b/test/e2e/crd_test.go index c1982d5..11842fc 100644 --- a/test/e2e/crd_test.go +++ b/test/e2e/crd_test.go @@ -24,7 +24,7 @@ func crdTest(ctx context.Context, _ client.Client, invConfig InventoryConfig, in By("apply a set of resources that includes both a crd and a cr") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) crdObj := manifestToUnstructured(crd) crObj := manifestToUnstructured(cr) @@ -214,7 +214,7 @@ func crdTest(ctx context.Context, _ client.Client, invConfig InventoryConfig, in By("destroy the resources, including the crd") destroyer := invConfig.DestroyerFactoryFunc() - options := apply.DestroyerOptions{InventoryPolicy: inventory.AdoptIfNoInventory} + options := apply.DestroyerOptions{InventoryPolicy: inventory.PolicyAdoptIfNoInventory} destroyerEvents := runCollect(destroyer.Run(ctx, inv, options)) expEvents = []testutil.ExpEvent{ diff --git a/test/e2e/customprovider/provider.go b/test/e2e/customprovider/provider.go index e5c82f0..0062f6f 100644 --- a/test/e2e/customprovider/provider.go +++ b/test/e2e/customprovider/provider.go @@ -74,16 +74,16 @@ var InventoryGVK = schema.GroupVersionKind{ Kind: "Inventory", } -var _ inventory.InventoryClientFactory = CustomInventoryClientFactory{} +var _ inventory.ClientFactory = CustomClientFactory{} -type CustomInventoryClientFactory struct { +type CustomClientFactory struct { } -func (CustomInventoryClientFactory) NewInventoryClient(factory util.Factory) (inventory.InventoryClient, error) { - return inventory.NewInventoryClient(factory, WrapInventoryObj, invToUnstructuredFunc) +func (CustomClientFactory) NewClient(factory util.Factory) (inventory.Client, error) { + return inventory.NewClient(factory, WrapInventoryObj, invToUnstructuredFunc) } -func invToUnstructuredFunc(inv inventory.InventoryInfo) *unstructured.Unstructured { +func invToUnstructuredFunc(inv inventory.Info) *unstructured.Unstructured { switch invInfo := inv.(type) { case *InventoryCustomType: return invInfo.inv @@ -96,12 +96,12 @@ func WrapInventoryObj(obj *unstructured.Unstructured) inventory.Storage { return &InventoryCustomType{inv: obj} } -func WrapInventoryInfoObj(obj *unstructured.Unstructured) inventory.InventoryInfo { +func WrapInventoryInfoObj(obj *unstructured.Unstructured) inventory.Info { return &InventoryCustomType{inv: obj} } var _ inventory.Storage = &InventoryCustomType{} -var _ inventory.InventoryInfo = &InventoryCustomType{} +var _ inventory.Info = &InventoryCustomType{} type InventoryCustomType struct { inv *unstructured.Unstructured @@ -115,7 +115,7 @@ func (i InventoryCustomType) Name() string { return i.inv.GetName() } -func (i InventoryCustomType) Strategy() inventory.InventoryStrategy { +func (i InventoryCustomType) Strategy() inventory.Strategy { return inventory.NameStrategy } diff --git a/test/e2e/depends_on_test.go b/test/e2e/depends_on_test.go index 2c88959..d492a60 100644 --- a/test/e2e/depends_on_test.go +++ b/test/e2e/depends_on_test.go @@ -22,7 +22,7 @@ func dependsOnTest(ctx context.Context, c client.Client, invConfig InventoryConf By("apply resources in order based on depends-on annotation") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) pod1Obj := withDependsOn(withNamespace(manifestToUnstructured(pod1), namespaceName), fmt.Sprintf("/namespaces/%s/Pod/pod3", namespaceName)) pod2Obj := withNamespace(manifestToUnstructured(pod2), namespaceName) @@ -300,7 +300,7 @@ func dependsOnTest(ctx context.Context, c client.Client, invConfig InventoryConf By("destroy resources in opposite order") destroyer := invConfig.DestroyerFactoryFunc() - options := apply.DestroyerOptions{InventoryPolicy: inventory.AdoptIfNoInventory} + options := apply.DestroyerOptions{InventoryPolicy: inventory.PolicyAdoptIfNoInventory} destroyerEvents := runCollect(destroyer.Run(ctx, inv, options)) expEvents = []testutil.ExpEvent{ diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ed2dc78..eb29d8d 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -31,15 +31,15 @@ import ( ) type inventoryFactoryFunc func(name, namespace, id string) *unstructured.Unstructured -type invWrapperFunc func(*unstructured.Unstructured) inventory.InventoryInfo +type invWrapperFunc func(*unstructured.Unstructured) inventory.Info type applierFactoryFunc func() *apply.Applier type destroyerFactoryFunc func() *apply.Destroyer type invSizeVerifyFunc func(ctx context.Context, c client.Client, name, namespace, id string, count int) type invCountVerifyFunc func(ctx context.Context, c client.Client, namespace string, count int) type InventoryConfig struct { - InventoryStrategy inventory.InventoryStrategy - InventoryFactoryFunc inventoryFactoryFunc + Strategy inventory.Strategy + FactoryFunc inventoryFactoryFunc InvWrapperFunc invWrapperFunc ApplierFactoryFunc applierFactoryFunc DestroyerFactoryFunc destroyerFactoryFunc @@ -54,8 +54,8 @@ const ( var inventoryConfigs = map[string]InventoryConfig{ ConfigMapTypeInvConfig: { - InventoryStrategy: inventory.LabelStrategy, - InventoryFactoryFunc: cmInventoryManifest, + Strategy: inventory.LabelStrategy, + FactoryFunc: cmInventoryManifest, InvWrapperFunc: inventory.WrapInventoryInfoObj, ApplierFactoryFunc: newDefaultInvApplier, DestroyerFactoryFunc: newDefaultInvDestroyer, @@ -63,8 +63,8 @@ var inventoryConfigs = map[string]InventoryConfig{ InvCountVerifyFunc: defaultInvCountVerifyFunc, }, CustomTypeInvConfig: { - InventoryStrategy: inventory.NameStrategy, - InventoryFactoryFunc: customInventoryManifest, + Strategy: inventory.NameStrategy, + FactoryFunc: customInventoryManifest, InvWrapperFunc: customprovider.WrapInventoryInfoObj, ApplierFactoryFunc: newCustomInvApplier, DestroyerFactoryFunc: newCustomInvDestroyer, @@ -243,7 +243,7 @@ var _ = Describe("Applier", func() { }) } - Context("InventoryStrategy: Name", func() { + Context("Strategy: Name", func() { var namespace *v1.Namespace var inventoryName string var ctx context.Context @@ -325,11 +325,11 @@ func deleteNamespace(ctx context.Context, c client.Client, namespace *v1.Namespa } func newDefaultInvApplier() *apply.Applier { - return newApplierFromInvFactory(inventory.ClusterInventoryClientFactory{}) + return newApplierFromInvFactory(inventory.ClusterClientFactory{}) } func newDefaultInvDestroyer() *apply.Destroyer { - return newDestroyerFromInvFactory(inventory.ClusterInventoryClientFactory{}) + return newDestroyerFromInvFactory(inventory.ClusterClientFactory{}) } func defaultInvSizeVerifyFunc(ctx context.Context, c client.Client, name, namespace, id string, count int) { @@ -355,11 +355,11 @@ func defaultInvCountVerifyFunc(ctx context.Context, c client.Client, namespace s } func newCustomInvApplier() *apply.Applier { - return newApplierFromInvFactory(customprovider.CustomInventoryClientFactory{}) + return newApplierFromInvFactory(customprovider.CustomClientFactory{}) } func newCustomInvDestroyer() *apply.Destroyer { - return newDestroyerFromInvFactory(customprovider.CustomInventoryClientFactory{}) + return newDestroyerFromInvFactory(customprovider.CustomClientFactory{}) } func newFactory() util.Factory { @@ -396,9 +396,9 @@ func customInvCountVerifyFunc(ctx context.Context, c client.Client, namespace st Expect(len(u.Items)).To(Equal(count)) } -func newApplierFromInvFactory(invFactory inventory.InventoryClientFactory) *apply.Applier { +func newApplierFromInvFactory(invFactory inventory.ClientFactory) *apply.Applier { f := newFactory() - invClient, err := invFactory.NewInventoryClient(f) + invClient, err := invFactory.NewClient(f) Expect(err).NotTo(HaveOccurred()) a, err := apply.NewApplierBuilder(). @@ -409,9 +409,9 @@ func newApplierFromInvFactory(invFactory inventory.InventoryClientFactory) *appl return a } -func newDestroyerFromInvFactory(invFactory inventory.InventoryClientFactory) *apply.Destroyer { +func newDestroyerFromInvFactory(invFactory inventory.ClientFactory) *apply.Destroyer { f := newFactory() - invClient, err := invFactory.NewInventoryClient(f) + invClient, err := invFactory.NewClient(f) Expect(err).NotTo(HaveOccurred()) d, err := apply.NewDestroyer(f, invClient) diff --git a/test/e2e/exit_early_test.go b/test/e2e/exit_early_test.go index 3777fb7..2219d29 100644 --- a/test/e2e/exit_early_test.go +++ b/test/e2e/exit_early_test.go @@ -23,7 +23,7 @@ func exitEarlyTest(ctx context.Context, c client.Client, invConfig InventoryConf By("exit early on invalid object") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) fields := struct{ Namespace string }{Namespace: namespaceName} // valid pod diff --git a/test/e2e/inventory_policy_test.go b/test/e2e/inventory_policy_test.go index 96f42ef..2988efa 100644 --- a/test/e2e/inventory_policy_test.go +++ b/test/e2e/inventory_policy_test.go @@ -25,7 +25,7 @@ func inventoryPolicyMustMatchTest(ctx context.Context, c client.Client, invConfi applier := invConfig.ApplierFactoryFunc() firstInvName := randomString("first-inv-") - firstInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(firstInvName, namespaceName, firstInvName)) + firstInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(firstInvName, namespaceName, firstInvName)) deployment1Obj := withNamespace(manifestToUnstructured(deployment1), namespaceName) firstResources := []*unstructured.Unstructured{ deployment1Obj, @@ -38,7 +38,7 @@ func inventoryPolicyMustMatchTest(ctx context.Context, c client.Client, invConfi By("Apply second set of resources") secondInvName := randomString("second-inv-") - secondInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(secondInvName, namespaceName, secondInvName)) + secondInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(secondInvName, namespaceName, secondInvName)) deployment1Obj = withNamespace(manifestToUnstructured(deployment1), namespaceName) secondResources := []*unstructured.Unstructured{ withReplicas(deployment1Obj, 6), @@ -47,7 +47,7 @@ func inventoryPolicyMustMatchTest(ctx context.Context, c client.Client, invConfi applierEvents := runCollect(applier.Run(ctx, secondInv, secondResources, apply.ApplierOptions{ ReconcileTimeout: 2 * time.Minute, EmitStatusEvents: true, - InventoryPolicy: inventory.InventoryPolicyMustMatch, + InventoryPolicy: inventory.PolicyMustMatch, })) By("Verify the events") @@ -196,7 +196,7 @@ func inventoryPolicyAdoptIfNoInventoryTest(ctx context.Context, c client.Client, applier := invConfig.ApplierFactoryFunc() invName := randomString("test-inv-") - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(invName, namespaceName, invName)) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(invName, namespaceName, invName)) deployment1Obj = withNamespace(manifestToUnstructured(deployment1), namespaceName) resources := []*unstructured.Unstructured{ withReplicas(deployment1Obj, 6), @@ -205,7 +205,7 @@ func inventoryPolicyAdoptIfNoInventoryTest(ctx context.Context, c client.Client, applierEvents := runCollect(applier.Run(ctx, inv, resources, apply.ApplierOptions{ ReconcileTimeout: 2 * time.Minute, EmitStatusEvents: true, - InventoryPolicy: inventory.AdoptIfNoInventory, + InventoryPolicy: inventory.PolicyAdoptIfNoInventory, })) By("Verify the events") @@ -365,7 +365,7 @@ func inventoryPolicyAdoptAllTest(ctx context.Context, c client.Client, invConfig applier := invConfig.ApplierFactoryFunc() firstInvName := randomString("first-inv-") - firstInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(firstInvName, namespaceName, firstInvName)) + firstInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(firstInvName, namespaceName, firstInvName)) deployment1Obj := withNamespace(manifestToUnstructured(deployment1), namespaceName) firstResources := []*unstructured.Unstructured{ deployment1Obj, @@ -378,7 +378,7 @@ func inventoryPolicyAdoptAllTest(ctx context.Context, c client.Client, invConfig By("Apply resources") secondInvName := randomString("test-inv-") - secondInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(secondInvName, namespaceName, secondInvName)) + secondInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(secondInvName, namespaceName, secondInvName)) deployment1Obj = withNamespace(manifestToUnstructured(deployment1), namespaceName) secondResources := []*unstructured.Unstructured{ withReplicas(deployment1Obj, 6), @@ -387,7 +387,7 @@ func inventoryPolicyAdoptAllTest(ctx context.Context, c client.Client, invConfig applierEvents := runCollect(applier.Run(ctx, secondInv, secondResources, apply.ApplierOptions{ ReconcileTimeout: 2 * time.Minute, EmitStatusEvents: true, - InventoryPolicy: inventory.AdoptAll, + InventoryPolicy: inventory.PolicyAdoptAll, })) By("Verify the events") diff --git a/test/e2e/mutation_test.go b/test/e2e/mutation_test.go index 65981b9..fe95f97 100644 --- a/test/e2e/mutation_test.go +++ b/test/e2e/mutation_test.go @@ -36,7 +36,7 @@ func mutationTest(ctx context.Context, c client.Client, invConfig InventoryConfi By("apply resources in order with substitutions based on apply-time-mutation annotation") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) fields := struct{ Namespace string }{Namespace: namespaceName} podAObj := templateToUnstructured(podATemplate, fields) @@ -256,7 +256,7 @@ func mutationTest(ctx context.Context, c client.Client, invConfig InventoryConfi By("destroy resources in opposite order") destroyer := invConfig.DestroyerFactoryFunc() - options := apply.DestroyerOptions{InventoryPolicy: inventory.AdoptIfNoInventory} + options := apply.DestroyerOptions{InventoryPolicy: inventory.PolicyAdoptIfNoInventory} destroyerEvents := runCollect(destroyer.Run(ctx, inv, options)) expEvents = []testutil.ExpEvent{ diff --git a/test/e2e/name_inv_strategy_test.go b/test/e2e/name_inv_strategy_test.go index 8b17f46..66dd526 100644 --- a/test/e2e/name_inv_strategy_test.go +++ b/test/e2e/name_inv_strategy_test.go @@ -20,7 +20,7 @@ func applyWithExistingInvTest(ctx context.Context, c client.Client, invConfig In applier := invConfig.ApplierFactoryFunc() orgInventoryID := fmt.Sprintf("%s-%s", inventoryName, namespaceName) - orgApplyInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, orgInventoryID)) + orgApplyInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, orgInventoryID)) resources := []*unstructured.Unstructured{ withNamespace(manifestToUnstructured(deployment1), namespaceName), @@ -36,7 +36,7 @@ func applyWithExistingInvTest(ctx context.Context, c client.Client, invConfig In By("Apply second set of resources, using same inventory name but different ID") secondInventoryID := fmt.Sprintf("%s-%s-2", inventoryName, namespaceName) - secondApplyInv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, secondInventoryID)) + secondApplyInv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, secondInventoryID)) err := run(applier.Run(ctx, secondApplyInv, resources, apply.ApplierOptions{ ReconcileTimeout: 2 * time.Minute, diff --git a/test/e2e/prune_retrieve_error_test.go b/test/e2e/prune_retrieve_error_test.go index fcc25b0..5b28c6d 100644 --- a/test/e2e/prune_retrieve_error_test.go +++ b/test/e2e/prune_retrieve_error_test.go @@ -296,7 +296,7 @@ func pruneRetrieveErrorTest(ctx context.Context, c client.Client, invConfig Inve By("Destroy resources") destroyer := invConfig.DestroyerFactoryFunc() - options := apply.DestroyerOptions{InventoryPolicy: inventory.AdoptIfNoInventory} + options := apply.DestroyerOptions{InventoryPolicy: inventory.PolicyAdoptIfNoInventory} destroyerEvents := runCollect(destroyer.Run(ctx, inv, options)) expEvents3 := []testutil.ExpEvent{ diff --git a/test/e2e/serverside_apply_test.go b/test/e2e/serverside_apply_test.go index 9f67189..05a0476 100644 --- a/test/e2e/serverside_apply_test.go +++ b/test/e2e/serverside_apply_test.go @@ -21,7 +21,7 @@ func serversideApplyTest(ctx context.Context, c client.Client, invConfig Invento By("Apply a Deployment and an APIService by server-side apply") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) firstResources := []*unstructured.Unstructured{ withNamespace(manifestToUnstructured(deployment1), namespaceName), manifestToUnstructured(apiservice1), diff --git a/test/e2e/skip_invalid_test.go b/test/e2e/skip_invalid_test.go index cda39da..08000f9 100644 --- a/test/e2e/skip_invalid_test.go +++ b/test/e2e/skip_invalid_test.go @@ -28,7 +28,7 @@ func skipInvalidTest(ctx context.Context, c client.Client, invConfig InventoryCo By("apply valid objects and skip invalid objects") applier := invConfig.ApplierFactoryFunc() - inv := invConfig.InvWrapperFunc(invConfig.InventoryFactoryFunc(inventoryName, namespaceName, "test")) + inv := invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, "test")) fields := struct{ Namespace string }{Namespace: namespaceName} // valid pod @@ -345,7 +345,7 @@ func skipInvalidTest(ctx context.Context, c client.Client, invConfig InventoryCo By("destroy valid objects and skip invalid objects") destroyer := invConfig.DestroyerFactoryFunc() destroyerEvents := runCollect(destroyer.Run(ctx, inv, apply.DestroyerOptions{ - InventoryPolicy: inventory.AdoptIfNoInventory, + InventoryPolicy: inventory.PolicyAdoptIfNoInventory, ValidationPolicy: validation.SkipInvalid, }))