From 3610f6dd3e7305e4b7df050f15ace26d2c4854a4 Mon Sep 17 00:00:00 2001 From: carlory Date: Wed, 15 Jun 2022 03:07:23 +0800 Subject: [PATCH] karmadactl support apply command Signed-off-by: carlory --- go.mod | 1 + pkg/karmadactl/apply.go | 216 ++++ pkg/karmadactl/karmadactl.go | 1 + .../jonboulle/clockwork/.editorconfig | 12 + .../github.com/jonboulle/clockwork/.gitignore | 27 + vendor/github.com/jonboulle/clockwork/LICENSE | 201 ++++ .../github.com/jonboulle/clockwork/README.md | 80 ++ .../jonboulle/clockwork/clockwork.go | 195 ++++ .../github.com/jonboulle/clockwork/ticker.go | 72 ++ .../pkg/util/jsonmergepatch/patch.go | 160 +++ vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go | 790 +++++++++++++++ .../pkg/cmd/apply/apply_edit_last_applied.go | 89 ++ .../pkg/cmd/apply/apply_set_last_applied.go | 229 +++++ .../pkg/cmd/apply/apply_view_last_applied.go | 174 ++++ .../k8s.io/kubectl/pkg/cmd/apply/patcher.go | 252 +++++ vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go | 159 +++ .../k8s.io/kubectl/pkg/cmd/delete/delete.go | 442 +++++++++ .../kubectl/pkg/cmd/delete/delete_flags.go | 251 +++++ .../kubectl/pkg/cmd/util/editor/crlf/crlf.go | 57 ++ .../pkg/cmd/util/editor/editoptions.go | 928 ++++++++++++++++++ .../kubectl/pkg/cmd/util/editor/editor.go | 168 ++++ vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go | 631 ++++++++++++ vendor/k8s.io/kubectl/pkg/util/apply.go | 146 +++ vendor/k8s.io/kubectl/pkg/util/pod_port.go | 36 + vendor/k8s.io/kubectl/pkg/util/prune/prune.go | 105 ++ .../k8s.io/kubectl/pkg/util/service_port.go | 59 ++ vendor/k8s.io/kubectl/pkg/util/umask.go | 29 + .../k8s.io/kubectl/pkg/util/umask_windows.go | 29 + vendor/k8s.io/kubectl/pkg/util/util.go | 93 ++ vendor/modules.txt | 11 + 30 files changed, 5643 insertions(+) create mode 100644 pkg/karmadactl/apply.go create mode 100644 vendor/github.com/jonboulle/clockwork/.editorconfig create mode 100644 vendor/github.com/jonboulle/clockwork/.gitignore create mode 100644 vendor/github.com/jonboulle/clockwork/LICENSE create mode 100644 vendor/github.com/jonboulle/clockwork/README.md create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork.go create mode 100644 vendor/github.com/jonboulle/clockwork/ticker.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/delete/delete.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/apply.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/pod_port.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/prune/prune.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/service_port.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/umask.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/umask_windows.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/util.go diff --git a/go.mod b/go.mod index e6135b0f8..5633c3e34 100644 --- a/go.mod +++ b/go.mod @@ -88,6 +88,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/pkg/karmadactl/apply.go b/pkg/karmadactl/apply.go new file mode 100644 index 000000000..60ebfe7e0 --- /dev/null +++ b/pkg/karmadactl/apply.go @@ -0,0 +1,216 @@ +package karmadactl + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + restclient "k8s.io/client-go/rest" + kubectlapply "k8s.io/kubectl/pkg/cmd/apply" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/templates" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/util/names" +) + +var metadataAccessor = meta.NewAccessor() + +// CommandApplyOptions contains the input to the apply command. +type CommandApplyOptions struct { + // global flags + options.GlobalCommandOptions + // apply flags + KubectlApplyFlags *kubectlapply.ApplyFlags + Namespace string + AllClusters bool + + kubectlApplyOptions *kubectlapply.ApplyOptions +} + +var ( + applyLong = templates.LongDesc(` + Apply a configuration to a resource by file name or stdin and propagate them into member clusters. + The resource name must be specified. This resource will be created if it doesn't exist yet. + To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'. + + JSON and YAML formats are accepted. + + Alpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274. + + Note: It implements the function of 'kubectl apply' by default. + If you want to propagate them into member clusters, please use 'kubectl apply --all-clusters'.`) + + applyExample = templates.Examples(` + # Apply the configuration without propagation into member clusters. It acts as 'kubectl apply'. + %[1]s apply -f manifest.yaml + + # Apply resources from a directory and propagate them into all member clusters. + %[1]s apply -f dir/ --all-clusters`) +) + +// NewCommandApplyOptions returns an initialized CommandApplyOptions instance +func NewCommandApplyOptions() *CommandApplyOptions { + streams := genericclioptions.IOStreams{In: getIn, Out: getOut, ErrOut: getErr} + flags := kubectlapply.NewApplyFlags(nil, streams) + return &CommandApplyOptions{ + KubectlApplyFlags: flags, + } +} + +// NewCmdApply creates the `apply` command +func NewCmdApply(karmadaConfig KarmadaConfig, parentCommand string) *cobra.Command { + o := NewCommandApplyOptions() + cmd := &cobra.Command{ + Use: "apply (-f FILENAME | -k DIRECTORY)", + Short: "Apply a configuration to a resource by file name or stdin and propagate them into member clusters", + Long: applyLong, + Example: fmt.Sprintf(applyExample, parentCommand), + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Complete(karmadaConfig, cmd, parentCommand, args); err != nil { + return err + } + if err := o.Validate(cmd, args); err != nil { + return err + } + return o.Run() + }, + } + + o.GlobalCommandOptions.AddFlags(cmd.Flags()) + o.KubectlApplyFlags.AddFlags(cmd) + cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "If present, the namespace scope for this CLI request") + cmd.Flags().BoolVarP(&o.AllClusters, "all-clusters", "", o.AllClusters, "If present, propagates a group of resources to all member clusters.") + return cmd +} + +// Complete completes all the required options +func (o *CommandApplyOptions) Complete(karmadaConfig KarmadaConfig, cmd *cobra.Command, parentCommand string, args []string) error { + restConfig, err := karmadaConfig.GetRestConfig(o.KarmadaContext, o.KubeConfig) + if err != nil { + return err + } + kubeConfigFlags := NewConfigFlags(true).WithDeprecatedPasswordFlag() + kubeConfigFlags.Namespace = &o.Namespace + kubeConfigFlags.WrapConfigFn = func(config *restclient.Config) *restclient.Config { return restConfig } + o.KubectlApplyFlags.Factory = cmdutil.NewFactory(kubeConfigFlags) + kubectlApplyOptions, err := o.KubectlApplyFlags.ToOptions(cmd, parentCommand, args) + if err != nil { + return err + } + o.kubectlApplyOptions = kubectlApplyOptions + return nil +} + +// Validate verifies if CommandApplyOptions are valid and without conflicts. +func (o *CommandApplyOptions) Validate(cmd *cobra.Command, args []string) error { + return o.kubectlApplyOptions.Validate(cmd, args) +} + +// Run executes the `apply` command. +func (o *CommandApplyOptions) Run() error { + if !o.AllClusters { + return o.kubectlApplyOptions.Run() + } + + if err := o.generateAndInjectPolices(); err != nil { + return err + } + + return o.kubectlApplyOptions.Run() +} + +// generateAndInjectPolices generates and injects policies to the given resources. +// It returns an error if any of the policies cannot be generated. +func (o *CommandApplyOptions) generateAndInjectPolices() error { + // load the resources + infos, err := o.kubectlApplyOptions.GetObjects() + if err != nil { + return err + } + + // generate policies and append them to the resources + var results []*resource.Info + for _, info := range infos { + results = append(results, info) + obj := o.generatePropagationObject(info) + gvk := obj.GetObjectKind().GroupVersionKind() + mapping, err := o.kubectlApplyOptions.Mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return fmt.Errorf("unable to recognize resource: %v", err) + } + client, err := o.KubectlApplyFlags.Factory.ClientForMapping(mapping) + if err != nil { + return fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + policyName, _ := metadataAccessor.Name(obj) + ret := &resource.Info{ + Namespace: info.Namespace, + Name: policyName, + Object: obj, + Mapping: mapping, + Client: client, + } + results = append(results, ret) + } + + // store the results object to be sequentially applied + o.kubectlApplyOptions.SetObjects(results) + return nil +} + +// generatePropagationObject generates a propagation object for the given resource info. +// It takes the resource namespace, name and GVK as input to generate policy name. +// TODO(carlory): allow users to select one or many member clusters to propagate resources. +func (o *CommandApplyOptions) generatePropagationObject(info *resource.Info) runtime.Object { + gvk := info.Mapping.GroupVersionKind + spec := policyv1alpha1.PropagationSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: info.Name, + Namespace: info.Namespace, + }, + }, + } + + if o.AllClusters { + spec.Placement.ClusterAffinity = &policyv1alpha1.ClusterAffinity{} + } + + // for a namespaced-scope resource, we need to generate a PropagationPolicy object. + // for a cluster-scope resource, we need to generate a ClusterPropagationPolicy object. + var obj runtime.Object + policyName := names.GeneratePolicyName(info.Namespace, info.Name, gvk.String()) + if info.Namespaced() { + obj = &policyv1alpha1.PropagationPolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy.karmada.io/v1alpha1", + Kind: "PropagationPolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + Namespace: info.Namespace, + }, + Spec: spec, + } + } else { + obj = &policyv1alpha1.ClusterPropagationPolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy.karmada.io/v1alpha1", + Kind: "ClusterPropagationPolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: spec, + } + } + return obj +} diff --git a/pkg/karmadactl/karmadactl.go b/pkg/karmadactl/karmadactl.go index 88b04eede..e46d55acf 100644 --- a/pkg/karmadactl/karmadactl.go +++ b/pkg/karmadactl/karmadactl.go @@ -52,6 +52,7 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { rootCmd.AddCommand(NewCmdCordon(karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdUncordon(karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdGet(karmadaConfig, parentCommand)) + rootCmd.AddCommand(NewCmdApply(karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdTaint(karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdPromote(karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdLogs(karmadaConfig, parentCommand)) diff --git a/vendor/github.com/jonboulle/clockwork/.editorconfig b/vendor/github.com/jonboulle/clockwork/.editorconfig new file mode 100644 index 000000000..4492e9f9f --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore new file mode 100644 index 000000000..00852bd94 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.gitignore @@ -0,0 +1,27 @@ +/.idea/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE new file mode 100644 index 000000000..5c304d1a4 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md new file mode 100644 index 000000000..cad608357 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -0,0 +1,80 @@ +# clockwork + +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#utilities) + +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/jonboulle/clockwork/CI?style=flat-square)](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI) +[![Go Report Card](https://goreportcard.com/badge/github.com/jonboulle/clockwork?style=flat-square)](https://goreportcard.com/report/github.com/jonboulle/clockwork) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.11-61CFDD.svg?style=flat-square) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/jonboulle/clockwork) + +**A simple fake clock for Go.** + + +## Usage + +Replace uses of the `time` package with the `clockwork.Clock` interface instead. + +For example, instead of using `time.Sleep` directly: + +```go +func myFunc() { + time.Sleep(3 * time.Second) + doSomething() +} +``` + +Inject a clock and use its `Sleep` method instead: + +```go +func myFunc(clock clockwork.Clock) { + clock.Sleep(3 * time.Second) + doSomething() +} +``` + +Now you can easily test `myFunc` with a `FakeClock`: + +```go +func TestMyFunc(t *testing.T) { + c := clockwork.NewFakeClock() + + // Start our sleepy function + var wg sync.WaitGroup + wg.Add(1) + go func() { + myFunc(c) + wg.Done() + }() + + // Ensure we wait until myFunc is sleeping + c.BlockUntil(1) + + assertState() + + // Advance the FakeClock forward in time + c.Advance(3 * time.Second) + + // Wait until the function completes + wg.Wait() + + assertState() +} +``` + +and in production builds, simply inject the real clock instead: + +```go +myFunc(clockwork.NewRealClock()) +``` + +See [example_test.go](example_test.go) for a full example. + + +# Credits + +clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](https://blog.golang.org/playground#TOC_3.1.) + + +## License + +Apache License, Version 2.0. Please see [License File](LICENSE) for more information. diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go new file mode 100644 index 000000000..1018051f4 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -0,0 +1,195 @@ +package clockwork + +import ( + "sync" + "time" +) + +// Clock provides an interface that packages can use instead of directly +// using the time module, so that chronology-related behavior can be tested +type Clock interface { + After(d time.Duration) <-chan time.Time + Sleep(d time.Duration) + Now() time.Time + Since(t time.Time) time.Duration + NewTicker(d time.Duration) Ticker +} + +// FakeClock provides an interface for a clock which can be +// manually advanced through time +type FakeClock interface { + Clock + // Advance advances the FakeClock to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the FakeClock has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// NewRealClock returns a Clock which simply delegates calls to the actual time +// package; it should be used by packages in production. +func NewRealClock() Clock { + return &realClock{} +} + +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. The initial time of the +// FakeClock will be an arbitrary non-zero time. +func NewFakeClock() FakeClock { + // use a fixture that does not fulfill Time.IsZero() + return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) +} + +// NewFakeClockAt returns a FakeClock initialised at the given time.Time. +func NewFakeClockAt(t time.Time) FakeClock { + return &fakeClock{ + time: t, + } +} + +type realClock struct{} + +func (rc *realClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (rc *realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +func (rc *realClock) Now() time.Time { + return time.Now() +} + +func (rc *realClock) Since(t time.Time) time.Duration { + return rc.Now().Sub(t) +} + +func (rc *realClock) NewTicker(d time.Duration) Ticker { + return &realTicker{time.NewTicker(d)} +} + +type fakeClock struct { + sleepers []*sleeper + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// sleeper represents a caller of After or Sleep +type sleeper struct { + until time.Time + done chan time.Time +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + fc.l.Lock() + defer fc.l.Unlock() + now := fc.time + done := make(chan time.Time, 1) + if d.Nanoseconds() <= 0 { + // special case - trigger immediately + done <- now + } else { + // otherwise, add to the set of sleepers + s := &sleeper{ + until: now.Add(d), + done: done, + } + fc.sleepers = append(fc.sleepers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + } + return done +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.RLock() + t := fc.time + fc.l.RUnlock() + return t +} + +// Since returns the duration that has passed since the given time on the fakeClock +func (fc *fakeClock) Since(t time.Time) time.Duration { + return fc.Now().Sub(t) +} + +func (fc *fakeClock) NewTicker(d time.Duration) Ticker { + ft := &fakeTicker{ + c: make(chan time.Time, 1), + stop: make(chan bool, 1), + clock: fc, + period: d, + } + ft.runTickThread() + return ft +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + end := fc.time.Add(d) + var newSleepers []*sleeper + for _, s := range fc.sleepers { + if end.Sub(s.until) >= 0 { + s.done <- end + } else { + newSleepers = append(newSleepers, s) + } + } + fc.sleepers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.sleepers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} diff --git a/vendor/github.com/jonboulle/clockwork/ticker.go b/vendor/github.com/jonboulle/clockwork/ticker.go new file mode 100644 index 000000000..32b5d01e7 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/ticker.go @@ -0,0 +1,72 @@ +package clockwork + +import ( + "time" +) + +// Ticker provides an interface which can be used instead of directly +// using the ticker within the time module. The real-time ticker t +// provides ticks through t.C which becomes now t.Chan() to make +// this channel requirement definable in this interface. +type Ticker interface { + Chan() <-chan time.Time + Stop() +} + +type realTicker struct{ *time.Ticker } + +func (rt *realTicker) Chan() <-chan time.Time { + return rt.C +} + +type fakeTicker struct { + c chan time.Time + stop chan bool + clock FakeClock + period time.Duration +} + +func (ft *fakeTicker) Chan() <-chan time.Time { + return ft.c +} + +func (ft *fakeTicker) Stop() { + ft.stop <- true +} + +// runTickThread initializes a background goroutine to send the tick time to the ticker channel +// after every period. Tick events are discarded if the underlying ticker channel does not have +// enough capacity. +func (ft *fakeTicker) runTickThread() { + nextTick := ft.clock.Now().Add(ft.period) + next := ft.clock.After(ft.period) + go func() { + for { + select { + case <-ft.stop: + return + case <-next: + // We send the time that the tick was supposed to occur at. + tick := nextTick + // Before sending the tick, we'll compute the next tick time and star the clock.After call. + now := ft.clock.Now() + // First, figure out how many periods there have been between "now" and the time we were + // supposed to have trigged, then advance over all of those. + skipTicks := (now.Sub(tick) + ft.period - 1) / ft.period + nextTick = nextTick.Add(skipTicks * ft.period) + // Now, keep advancing until we are past now. This should happen at most once. + for !nextTick.After(now) { + nextTick = nextTick.Add(ft.period) + } + // Figure out how long between now and the next scheduled tick, then wait that long. + remaining := nextTick.Sub(now) + next = ft.clock.After(remaining) + // Finally, we can actually send the tick. + select { + case ft.c <- tick: + default: + } + } + } + }() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go new file mode 100644 index 000000000..e56e17734 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go @@ -0,0 +1,160 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonmergepatch + +import ( + "fmt" + "reflect" + + "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// Create a 3-way merge patch based-on JSON merge patch. +// Calculate addition-and-change patch between current and modified. +// Calculate deletion patch between original and modified. +func CreateThreeWayJSONMergePatch(original, modified, current []byte, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + if len(original) == 0 { + original = []byte(`{}`) + } + if len(modified) == 0 { + modified = []byte(`{}`) + } + if len(current) == 0 { + current = []byte(`{}`) + } + + addAndChangePatch, err := jsonpatch.CreateMergePatch(current, modified) + if err != nil { + return nil, err + } + // Only keep addition and changes + addAndChangePatch, addAndChangePatchObj, err := keepOrDeleteNullInJsonPatch(addAndChangePatch, false) + if err != nil { + return nil, err + } + + deletePatch, err := jsonpatch.CreateMergePatch(original, modified) + if err != nil { + return nil, err + } + // Only keep deletion + deletePatch, deletePatchObj, err := keepOrDeleteNullInJsonPatch(deletePatch, true) + if err != nil { + return nil, err + } + + hasConflicts, err := mergepatch.HasConflicts(addAndChangePatchObj, deletePatchObj) + if err != nil { + return nil, err + } + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(addAndChangePatchObj), mergepatch.ToYAMLOrError(deletePatchObj)) + } + patch, err := jsonpatch.MergePatch(deletePatch, addAndChangePatch) + if err != nil { + return nil, err + } + + var patchMap map[string]interface{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshal patch for precondition check: %s", patch) + } + meetPreconditions, err := meetPreconditions(patchMap, fns...) + if err != nil { + return nil, err + } + if !meetPreconditions { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + + return patch, nil +} + +// keepOrDeleteNullInJsonPatch takes a json-encoded byte array and a boolean. +// It returns a filtered object and its corresponding json-encoded byte array. +// It is a wrapper of func keepOrDeleteNullInObj +func keepOrDeleteNullInJsonPatch(patch []byte, keepNull bool) ([]byte, map[string]interface{}, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, nil, err + } + filteredMap, err := keepOrDeleteNullInObj(patchMap, keepNull) + if err != nil { + return nil, nil, err + } + o, err := json.Marshal(filteredMap) + return o, filteredMap, err +} + +// keepOrDeleteNullInObj will keep only the null value and delete all the others, +// if keepNull is true. Otherwise, it will delete all the null value and keep the others. +func keepOrDeleteNullInObj(m map[string]interface{}, keepNull bool) (map[string]interface{}, error) { + filteredMap := make(map[string]interface{}) + var err error + for key, val := range m { + switch { + case keepNull && val == nil: + filteredMap[key] = nil + case val != nil: + switch typedVal := val.(type) { + case map[string]interface{}: + // Explicitly-set empty maps are treated as values instead of empty patches + if len(typedVal) == 0 { + if !keepNull { + filteredMap[key] = typedVal + } + continue + } + + var filteredSubMap map[string]interface{} + filteredSubMap, err = keepOrDeleteNullInObj(typedVal, keepNull) + if err != nil { + return nil, err + } + + // If the returned filtered submap was empty, this is an empty patch for the entire subdict, so the key + // should not be set + if len(filteredSubMap) != 0 { + filteredMap[key] = filteredSubMap + } + + case []interface{}, string, float64, bool, int64, nil: + // Lists are always replaced in Json, no need to check each entry in the list. + if !keepNull { + filteredMap[key] = val + } + default: + return nil, fmt.Errorf("unknown type: %v", reflect.TypeOf(typedVal)) + } + } + } + return filteredMap, nil +} + +func meetPreconditions(patchObj map[string]interface{}, fns ...mergepatch.PreconditionFunc) (bool, error) { + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchObj) { + return false, fmt.Errorf("precondition failed for: %v", patchObj) + } + } + return true, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go new file mode 100644 index 000000000..2f13cf969 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -0,0 +1,790 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "fmt" + "io" + "net/http" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + "k8s.io/kubectl/pkg/cmd/delete" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/util/prune" + "k8s.io/kubectl/pkg/util/templates" + "k8s.io/kubectl/pkg/validation" +) + +// ApplyFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type ApplyFlags struct { + Factory cmdutil.Factory + + RecordFlags *genericclioptions.RecordFlags + PrintFlags *genericclioptions.PrintFlags + + DeleteFlags *delete.DeleteFlags + + FieldManager string + Selector string + Prune bool + PruneResources []prune.Resource + All bool + Overwrite bool + OpenAPIPatch bool + PruneWhitelist []string + + genericclioptions.IOStreams +} + +// ApplyOptions defines flags and other configuration parameters for the `apply` command +type ApplyOptions struct { + Recorder genericclioptions.Recorder + + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + + DeleteOptions *delete.DeleteOptions + + ServerSideApply bool + ForceConflicts bool + FieldManager string + Selector string + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.QueryParamVerifier + FieldValidationVerifier *resource.QueryParamVerifier + Prune bool + PruneResources []prune.Resource + cmdBaseName string + All bool + Overwrite bool + OpenAPIPatch bool + PruneWhitelist []string + + ValidationDirective string + Validator validation.Schema + Builder *resource.Builder + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + OpenAPISchema openapi.Resources + + Namespace string + EnforceNamespace bool + + genericclioptions.IOStreams + + // Objects (and some denormalized data) which are to be + // applied. The standard way to fill in this structure + // is by calling "GetObjects()", which will use the + // resource builder if "objectsCached" is false. The other + // way to set this field is to use "SetObjects()". + // Subsequent calls to "GetObjects()" after setting would + // not call the resource builder; only return the set objects. + objects []*resource.Info + objectsCached bool + + // Stores visited objects/namespaces for later use + // calculating the set of objects to prune. + VisitedUids sets.String + VisitedNamespaces sets.String + + // Function run after the objects are generated and + // stored in the "objects" field, but before the + // apply is run on these objects. + PreProcessorFn func() error + // Function run after all objects have been applied. + // The standard PostProcessorFn is "PrintAndPrunePostProcessor()". + PostProcessorFn func() error +} + +var ( + applyLong = templates.LongDesc(i18n.T(` + Apply a configuration to a resource by file name or stdin. + The resource name must be specified. This resource will be created if it doesn't exist yet. + To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'. + + JSON and YAML formats are accepted. + + Alpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.`)) + + applyExample = templates.Examples(i18n.T(` + # Apply the configuration in pod.json to a pod + kubectl apply -f ./pod.json + + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml + kubectl apply -k dir/ + + # Apply the JSON passed into stdin to a pod + cat pod.json | kubectl apply -f - + + # Apply the configuration from all files that end with '.json' - i.e. expand wildcard characters in file names + kubectl apply -f '*.json' + + # Note: --prune is still in Alpha + # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx + kubectl apply --prune -f manifest.yaml -l app=nginx + + # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file + kubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap`)) + + warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n" + warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n" +) + +// NewApplyFlags returns a default ApplyFlags +func NewApplyFlags(f cmdutil.Factory, streams genericclioptions.IOStreams) *ApplyFlags { + return &ApplyFlags{ + Factory: f, + RecordFlags: genericclioptions.NewRecordFlags(), + DeleteFlags: delete.NewDeleteFlags("that contains the configuration to apply"), + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + + Overwrite: true, + OpenAPIPatch: true, + + IOStreams: streams, + } +} + +// NewCmdApply creates the `apply` command +func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + flags := NewApplyFlags(f, ioStreams) + + cmd := &cobra.Command{ + Use: "apply (-f FILENAME | -k DIRECTORY)", + DisableFlagsInUseLine: true, + Short: i18n.T("Apply a configuration to a resource by file name or stdin"), + Long: applyLong, + Example: applyExample, + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(cmd, baseName, args) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.Validate(cmd, args)) + cmdutil.CheckErr(o.Run()) + }, + } + + flags.AddFlags(cmd) + + // apply subcommands + cmd.AddCommand(NewCmdApplyViewLastApplied(flags.Factory, flags.IOStreams)) + cmd.AddCommand(NewCmdApplySetLastApplied(flags.Factory, flags.IOStreams)) + cmd.AddCommand(NewCmdApplyEditLastApplied(flags.Factory, flags.IOStreams)) + + return cmd +} + +// AddFlags registers flags for a cli +func (flags *ApplyFlags) AddFlags(cmd *cobra.Command) { + // bind flag structs + flags.DeleteFlags.AddFlags(cmd) + flags.RecordFlags.AddFlags(cmd) + flags.PrintFlags.AddFlags(cmd) + + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddServerSideApplyFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &flags.FieldManager, FieldManagerClientSideApply) + cmdutil.AddLabelSelectorFlagVar(cmd, &flags.Selector) + + cmd.Flags().BoolVar(&flags.Overwrite, "overwrite", flags.Overwrite, "Automatically resolve conflicts between the modified and live configuration by using values from the modified configuration") + cmd.Flags().BoolVar(&flags.Prune, "prune", flags.Prune, "Automatically delete resource objects, that do not appear in the configs and are created by either apply or create --save-config. Should be used with either -l or --all.") + cmd.Flags().BoolVar(&flags.All, "all", flags.All, "Select all resources in the namespace of the specified resource types.") + cmd.Flags().StringArrayVar(&flags.PruneWhitelist, "prune-whitelist", flags.PruneWhitelist, "Overwrite the default whitelist with for --prune") + cmd.Flags().BoolVar(&flags.OpenAPIPatch, "openapi-patch", flags.OpenAPIPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") +} + +// ToOptions converts from CLI inputs to runtime inputs +func (flags *ApplyFlags) ToOptions(cmd *cobra.Command, baseName string, args []string) (*ApplyOptions, error) { + serverSideApply := cmdutil.GetServerSideApplyFlag(cmd) + forceConflicts := cmdutil.GetForceConflictsFlag(cmd) + dryRunStrategy, err := cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return nil, err + } + + dynamicClient, err := flags.Factory.DynamicClient() + if err != nil { + return nil, err + } + + dryRunVerifier := resource.NewQueryParamVerifier(dynamicClient, flags.Factory.OpenAPIGetter(), resource.QueryParamDryRun) + fieldValidationVerifier := resource.NewQueryParamVerifier(dynamicClient, flags.Factory.OpenAPIGetter(), resource.QueryParamFieldValidation) + fieldManager := GetApplyFieldManagerFlag(cmd, serverSideApply) + + // allow for a success message operation to be specified at print time + toPrinter := func(operation string) (printers.ResourcePrinter, error) { + flags.PrintFlags.NamePrintFlags.Operation = operation + cmdutil.PrintFlagsWithDryRunStrategy(flags.PrintFlags, dryRunStrategy) + return flags.PrintFlags.ToPrinter() + } + + flags.RecordFlags.Complete(cmd) + recorder, err := flags.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + + deleteOptions, err := flags.DeleteFlags.ToOptions(dynamicClient, flags.IOStreams) + if err != nil { + return nil, err + } + + err = deleteOptions.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return nil, err + } + + openAPISchema, _ := flags.Factory.OpenAPISchema() + + validationDirective, err := cmdutil.GetValidationDirective(cmd) + if err != nil { + return nil, err + } + validator, err := flags.Factory.Validator(validationDirective, fieldValidationVerifier) + if err != nil { + return nil, err + } + builder := flags.Factory.NewBuilder() + mapper, err := flags.Factory.ToRESTMapper() + if err != nil { + return nil, err + } + + namespace, enforceNamespace, err := flags.Factory.ToRawKubeConfigLoader().Namespace() + if err != nil { + return nil, err + } + + if flags.Prune { + flags.PruneResources, err = prune.ParseResources(mapper, flags.PruneWhitelist) + if err != nil { + return nil, err + } + } + + o := &ApplyOptions{ + // Store baseName for use in printing warnings / messages involving the base command name. + // This is useful for downstream command that wrap this one. + cmdBaseName: baseName, + + PrintFlags: flags.PrintFlags, + + DeleteOptions: deleteOptions, + ToPrinter: toPrinter, + ServerSideApply: serverSideApply, + ForceConflicts: forceConflicts, + FieldManager: fieldManager, + Selector: flags.Selector, + DryRunStrategy: dryRunStrategy, + DryRunVerifier: dryRunVerifier, + Prune: flags.Prune, + PruneResources: flags.PruneResources, + All: flags.All, + Overwrite: flags.Overwrite, + OpenAPIPatch: flags.OpenAPIPatch, + PruneWhitelist: flags.PruneWhitelist, + + Recorder: recorder, + Namespace: namespace, + EnforceNamespace: enforceNamespace, + Validator: validator, + ValidationDirective: validationDirective, + Builder: builder, + Mapper: mapper, + DynamicClient: dynamicClient, + OpenAPISchema: openAPISchema, + + IOStreams: flags.IOStreams, + + objects: []*resource.Info{}, + objectsCached: false, + + VisitedUids: sets.NewString(), + VisitedNamespaces: sets.NewString(), + } + + o.PostProcessorFn = o.PrintAndPrunePostProcessor() + + return o, nil +} + +// Validate verifies if ApplyOptions are valid and without conflicts. +func (o *ApplyOptions) Validate(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) + } + + if o.ForceConflicts && !o.ServerSideApply { + return fmt.Errorf("--force-conflicts only works with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunClient && o.ServerSideApply { + return fmt.Errorf("--dry-run=client doesn't work with --server-side (did you mean --dry-run=server instead?)") + } + + if o.ServerSideApply && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--force cannot be used with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunServer && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--dry-run=server cannot be used with --force") + } + + if o.All && len(o.Selector) > 0 { + return fmt.Errorf("cannot set --all and --selector at the same time") + } + + if o.Prune && !o.All && o.Selector == "" { + return fmt.Errorf("all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector") + } + + return nil +} + +func isIncompatibleServerError(err error) bool { + // 415: Unsupported media type means we're talking to a server which doesn't + // support server-side apply. + if _, ok := err.(*errors.StatusError); !ok { + // Non-StatusError means the error isn't because the server is incompatible. + return false + } + return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType +} + +// GetObjects returns a (possibly cached) version of all the valid objects to apply +// as a slice of pointer to resource.Info and an error if one or more occurred. +// IMPORTANT: This function can return both valid objects AND an error, since +// "ContinueOnError" is set on the builder. This function should not be called +// until AFTER the "complete" and "validate" methods have been called to ensure that +// the ApplyOptions is filled in and valid. +func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) { + var err error = nil + if !o.objectsCached { + r := o.Builder. + Unstructured(). + Schema(o.Validator). + ContinueOnError(). + NamespaceParam(o.Namespace).DefaultNamespace(). + FilenameParam(o.EnforceNamespace, &o.DeleteOptions.FilenameOptions). + LabelSelectorParam(o.Selector). + Flatten(). + Do() + o.objects, err = r.Infos() + o.objectsCached = true + } + return o.objects, err +} + +// SetObjects stores the set of objects (as resource.Info) to be +// subsequently applied. +func (o *ApplyOptions) SetObjects(infos []*resource.Info) { + o.objects = infos + o.objectsCached = true +} + +// Run executes the `apply` command. +func (o *ApplyOptions) Run() error { + if o.PreProcessorFn != nil { + klog.V(4).Infof("Running apply pre-processor function") + if err := o.PreProcessorFn(); err != nil { + return err + } + } + + // Enforce CLI specified namespace on server request. + if o.EnforceNamespace { + o.VisitedNamespaces.Insert(o.Namespace) + } + + // Generates the objects using the resource builder if they have not + // already been stored by calling "SetObjects()" in the pre-processor. + errs := []error{} + infos, err := o.GetObjects() + if err != nil { + errs = append(errs, err) + } + if len(infos) == 0 && len(errs) == 0 { + return fmt.Errorf("no objects passed to apply") + } + // Iterate through all objects, applying each one. + for _, info := range infos { + if err := o.applyOneObject(info); err != nil { + errs = append(errs, err) + } + } + // If any errors occurred during apply, then return error (or + // aggregate of errors). + if len(errs) == 1 { + return errs[0] + } + if len(errs) > 1 { + return utilerrors.NewAggregate(errs) + } + + if o.PostProcessorFn != nil { + klog.V(4).Infof("Running apply post-processor function") + if err := o.PostProcessorFn(); err != nil { + return err + } + } + + return nil +} + +func (o *ApplyOptions) applyOneObject(info *resource.Info) error { + o.MarkNamespaceVisited(info) + + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + + if len(info.Name) == 0 { + metadata, _ := meta.Accessor(info.Object) + generatedName := metadata.GetGenerateName() + if len(generatedName) > 0 { + return fmt.Errorf("from %s: cannot use generate name with apply", generatedName) + } + } + + helper := resource.NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective) + + if o.DryRunStrategy == cmdutil.DryRunServer { + // Ensure the APIServer supports server-side dry-run for the resource, + // otherwise fail early. + // For APIServers that don't support server-side dry-run will persist + // changes. + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } + + if o.ServerSideApply { + // Send the full object to be applied on the server side. + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("serverside-apply", info.Source, err) + } + + options := metav1.PatchOptions{ + Force: &o.ForceConflicts, + } + obj, err := helper.Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ) + if err != nil { + if isIncompatibleServerError(err) { + err = fmt.Errorf("Server-side apply not available on the server: (%v)", err) + } + if errors.IsConflict(err) { + err = fmt.Errorf(`%v +Please review the fields above--they currently have other managers. Here +are the ways you can resolve this warning: +* If you intend to manage all of these fields, please re-run the apply + command with the `+"`--force-conflicts`"+` flag. +* If you do not intend to manage all of the fields, please edit your + manifest to remove references to the fields that should keep their + current managers. +* You may co-own fields by updating your manifest to match the existing + value; in this case, you'll become the manager if the other manager(s) + stop managing the field (remove it from their configuration). +See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`, err) + } + return err + } + + info.Refresh(obj, true) + + WarnIfDeleting(info.Object, o.ErrOut) + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("serverside-applied") + if err != nil { + return err + } + + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + // Get the modified configuration of the object. Embed the result + // as an annotation in the modified configuration, so that it will appear + // in the patch sent to the server. + modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err) + } + + if err := info.Get(); err != nil { + if !errors.IsNotFound(err) { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + + // Create the resource if it doesn't exist + // First, update the annotation used by kubectl apply + if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + // Then create the resource and skip the three-way merge + obj, err := helper.Create(info.Namespace, true, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + info.Refresh(obj, true) + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("created") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + metadata, _ := meta.Accessor(info.Object) + annotationMap := metadata.GetAnnotations() + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { + fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, info.ObjectName(), corev1.LastAppliedConfigAnnotation, o.cmdBaseName) + } + + patcher, err := newPatcher(o, info, helper) + if err != nil { + return err + } + patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + } + + info.Refresh(patchedObject, true) + + WarnIfDeleting(info.Object, o.ErrOut) + + if string(patchBytes) == "{}" && !o.shouldPrintObject() { + printer, err := o.ToPrinter("unchanged") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("configured") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + + return nil +} + +func (o *ApplyOptions) shouldPrintObject() bool { + // Print object only if output format other than "name" is specified + shouldPrint := false + output := *o.PrintFlags.OutputFormat + shortOutput := output == "name" + if len(output) > 0 && !shortOutput { + shouldPrint = true + } + return shouldPrint +} + +func (o *ApplyOptions) printObjects() error { + + if !o.shouldPrintObject() { + return nil + } + + infos, err := o.GetObjects() + if err != nil { + return err + } + + if len(infos) > 0 { + printer, err := o.ToPrinter("") + if err != nil { + return err + } + + objToPrint := infos[0].Object + if len(infos) > 1 { + objs := []runtime.Object{} + for _, info := range infos { + objs = append(objs, info.Object) + } + list := &corev1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + ListMeta: metav1.ListMeta{}, + } + if err := meta.SetList(list, objs); err != nil { + return err + } + + objToPrint = list + } + if err := printer.PrintObj(objToPrint, o.Out); err != nil { + return err + } + } + + return nil +} + +// MarkNamespaceVisited keeps track of which namespaces the applied +// objects belong to. Used for pruning. +func (o *ApplyOptions) MarkNamespaceVisited(info *resource.Info) { + if info.Namespaced() { + o.VisitedNamespaces.Insert(info.Namespace) + } +} + +// MarkObjectVisited keeps track of UIDs of the applied +// objects. Used for pruning. +func (o *ApplyOptions) MarkObjectVisited(info *resource.Info) error { + metadata, err := meta.Accessor(info.Object) + if err != nil { + return err + } + o.VisitedUids.Insert(string(metadata.GetUID())) + return nil +} + +// PrintAndPrunePostProcessor returns a function which meets the PostProcessorFn +// function signature. This returned function prints all the +// objects as a list (if configured for that), and prunes the +// objects not applied. The returned function is the standard +// apply post processor. +func (o *ApplyOptions) PrintAndPrunePostProcessor() func() error { + + return func() error { + if err := o.printObjects(); err != nil { + return err + } + + if o.Prune { + p := newPruner(o) + return p.pruneAll(o) + } + + return nil + } +} + +const ( + // FieldManagerClientSideApply is the default client-side apply field manager. + // + // The default field manager is not `kubectl-apply` to distinguish from + // server-side apply. + FieldManagerClientSideApply = "kubectl-client-side-apply" + // The default server-side apply field manager is `kubectl` + // instead of a field manager like `kubectl-server-side-apply` + // for backward compatibility to not conflict with old versions + // of kubectl server-side apply where `kubectl` has already been the field manager. + fieldManagerServerSideApply = "kubectl" +) + +// GetApplyFieldManagerFlag gets the field manager for kubectl apply +// if it is not set. +// +// The default field manager is not `kubectl-apply` to distinguish between +// client-side and server-side apply. +func GetApplyFieldManagerFlag(cmd *cobra.Command, serverSide bool) string { + // The field manager flag was set + if cmd.Flag("field-manager").Changed { + return cmdutil.GetFlagString(cmd, "field-manager") + } + + if serverSide { + return fieldManagerServerSideApply + } + + return FieldManagerClientSideApply +} + +// WarnIfDeleting prints a warning if a resource is being deleted +func WarnIfDeleting(obj runtime.Object, stderr io.Writer) { + metadata, _ := meta.Accessor(obj) + if metadata != nil && metadata.GetDeletionTimestamp() != nil { + // just warn the user about the conflict + fmt.Fprintf(stderr, warningChangesOnDeletingResource, metadata.GetName()) + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go new file mode 100644 index 000000000..7ef35fca7 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + applyEditLastAppliedLong = templates.LongDesc(i18n.T(` + Edit the latest last-applied-configuration annotations of resources from the default editor. + + The edit-last-applied command allows you to directly edit any API resource you can retrieve via the + command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR + environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. + You can edit multiple objects, although changes are applied one at a time. The command + accepts file names as well as command-line arguments, although the files you point to must + be previously saved versions of resources. + + The default format is YAML. To edit in JSON, specify "-o json". + + The flag --windows-line-endings can be used to force Windows line endings, + otherwise the default for your operating system will be used. + + In the event an error occurs while updating, a temporary file will be created on disk + that contains your unapplied changes. The most common error when updating a resource + is another editor changing the resource on the server. When this occurs, you will have + to apply your changes to the newer version of the resource, or update your temporary + saved copy to include the latest resource version.`)) + + applyEditLastAppliedExample = templates.Examples(` + # Edit the last-applied-configuration annotations by type/name in YAML + kubectl apply edit-last-applied deployment/nginx + + # Edit the last-applied-configuration annotations by file in JSON + kubectl apply edit-last-applied -f deploy.yaml -o json`) +) + +// NewCmdApplyEditLastApplied created the cobra CLI command for the `apply edit-last-applied` command. +func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + o := editor.NewEditOptions(editor.ApplyEditMode, ioStreams) + + cmd := &cobra.Command{ + Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("Edit latest last-applied-configuration annotations of a resource/object"), + Long: applyEditLastAppliedLong, + Example: applyEditLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Run()) + }, + } + + // bind flag structs + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + usage := "to use to edit the resource" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, + "Defaults to the line ending native to your platform.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply) + cmdutil.AddValidateFlags(cmd) + + return cmd +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go new file mode 100644 index 000000000..86b49f490 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go @@ -0,0 +1,229 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// SetLastAppliedOptions defines options for the `apply set-last-applied` command.` +type SetLastAppliedOptions struct { + CreateAnnotation bool + + PrintFlags *genericclioptions.PrintFlags + PrintObj printers.ResourcePrinterFunc + + FilenameOptions resource.FilenameOptions + + infoList []*resource.Info + namespace string + enforceNamespace bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.QueryParamVerifier + shortOutput bool + output string + patchBufferList []PatchBuffer + builder *resource.Builder + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + + genericclioptions.IOStreams +} + +// PatchBuffer caches changes that are to be applied. +type PatchBuffer struct { + Patch []byte + PatchType types.PatchType +} + +var ( + applySetLastAppliedLong = templates.LongDesc(i18n.T(` + Set the latest last-applied-configuration annotations by setting it to match the contents of a file. + This results in the last-applied-configuration being updated as though 'kubectl apply -f ' was run, + without updating any other parts of the object.`)) + + applySetLastAppliedExample = templates.Examples(i18n.T(` + # Set the last-applied-configuration of a resource to match the contents of a file + kubectl apply set-last-applied -f deploy.yaml + + # Execute set-last-applied against each configuration file in a directory + kubectl apply set-last-applied -f path/ + + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist + kubectl apply set-last-applied -f deploy.yaml --create-annotation=true + `)) +) + +// NewSetLastAppliedOptions takes option arguments from a CLI stream and returns it at SetLastAppliedOptions type. +func NewSetLastAppliedOptions(ioStreams genericclioptions.IOStreams) *SetLastAppliedOptions { + return &SetLastAppliedOptions{ + PrintFlags: genericclioptions.NewPrintFlags("configured").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdApplySetLastApplied creates the cobra CLI `apply` subcommand `set-last-applied`.` +func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + o := NewSetLastAppliedOptions(ioStreams) + cmd := &cobra.Command{ + Use: "set-last-applied -f FILENAME", + DisableFlagsInUseLine: true, + Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file"), + Long: applySetLastAppliedLong, + Example: applySetLastAppliedExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunSetLastApplied()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().BoolVar(&o.CreateAnnotation, "create-annotation", o.CreateAnnotation, "Will create 'last-applied-configuration' annotations if current objects doesn't have one") + cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, "Filename, directory, or URL to files that contains the last-applied-configuration annotations") + + return cmd +} + +// Complete populates dry-run and output flag options. +func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { + var err error + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewQueryParamVerifier(dynamicClient, f.OpenAPIGetter(), resource.QueryParamDryRun) + o.output = cmdutil.GetFlagString(cmd, "output") + o.shortOutput = o.output == "name" + + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + o.builder = f.NewBuilder() + o.unstructuredClientForMapping = f.UnstructuredClientForMapping + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = printer.PrintObj + + return nil +} + +// Validate checks SetLastAppliedOptions for validity. +func (o *SetLastAppliedOptions) Validate() error { + r := o.builder. + Unstructured(). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Flatten(). + Do() + + err := r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + patchBuf, diffBuf, patchType, err := editor.GetApplyPatch(info.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + // Verify the object exists in the cluster before trying to patch it. + if err := info.Get(); err != nil { + if errors.IsNotFound(err) { + return err + } + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + originalBuf, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + if originalBuf == nil && !o.CreateAnnotation { + return fmt.Errorf("no last-applied-configuration annotation found on resource: %s, to create the annotation, run the command with --create-annotation", info.Name) + } + + //only add to PatchBufferList when changed + if !bytes.Equal(cmdutil.StripComments(originalBuf), cmdutil.StripComments(diffBuf)) { + p := PatchBuffer{Patch: patchBuf, PatchType: patchType} + o.patchBufferList = append(o.patchBufferList, p) + o.infoList = append(o.infoList, info) + } else { + fmt.Fprintf(o.Out, "set-last-applied %s: no changes required.\n", info.Name) + } + + return nil + }) + return err +} + +// RunSetLastApplied executes the `set-last-applied` command according to SetLastAppliedOptions. +func (o *SetLastAppliedOptions) RunSetLastApplied() error { + for i, patch := range o.patchBufferList { + info := o.infoList[i] + finalObj := info.Object + + if o.dryRunStrategy != cmdutil.DryRunClient { + mapping := info.ResourceMapping() + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + } + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + finalObj, err = helper.Patch(info.Namespace, info.Name, patch.PatchType, patch.Patch, nil) + if err != nil { + return err + } + } + if err := o.PrintObj(finalObj, o.Out); err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go new file mode 100644 index 000000000..30983fe3c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + "sigs.k8s.io/yaml" +) + +// ViewLastAppliedOptions defines options for the `apply view-last-applied` command.` +type ViewLastAppliedOptions struct { + FilenameOptions resource.FilenameOptions + Selector string + LastAppliedConfigurationList []string + OutputFormat string + All bool + Factory cmdutil.Factory + + genericclioptions.IOStreams +} + +var ( + applyViewLastAppliedLong = templates.LongDesc(i18n.T(` + View the latest last-applied-configuration annotations by type/name or file. + + The default output will be printed to stdout in YAML format. You can use the -o option + to change the output format.`)) + + applyViewLastAppliedExample = templates.Examples(i18n.T(` + # View the last-applied-configuration annotations by type/name in YAML + kubectl apply view-last-applied deployment/nginx + + # View the last-applied-configuration annotations by file in JSON + kubectl apply view-last-applied -f deploy.yaml -o json`)) +) + +// NewViewLastAppliedOptions takes option arguments from a CLI stream and returns it at ViewLastAppliedOptions type. +func NewViewLastAppliedOptions(ioStreams genericclioptions.IOStreams) *ViewLastAppliedOptions { + return &ViewLastAppliedOptions{ + OutputFormat: "yaml", + + IOStreams: ioStreams, + } +} + +// NewCmdApplyViewLastApplied creates the cobra CLI `apply` subcommand `view-last-applied`.` +func NewCmdApplyViewLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + options := NewViewLastAppliedOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("View the latest last-applied-configuration annotations of a resource/object"), + Long: applyViewLastAppliedLong, + Example: applyViewLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(options.Complete(cmd, f, args)) + cmdutil.CheckErr(options.Validate(cmd)) + cmdutil.CheckErr(options.RunApplyViewLastApplied(cmd)) + }, + } + + cmd.Flags().StringVarP(&options.OutputFormat, "output", "o", options.OutputFormat, `Output format. Must be one of (yaml, json)`) + cmd.Flags().BoolVar(&options.All, "all", options.All, "Select all resources in the namespace of the specified resource types") + usage := "that contains the last-applied-configuration annotations" + cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) + cmdutil.AddLabelSelectorFlagVar(cmd, &options.Selector) + + return cmd +} + +// Complete checks an object for last-applied-configuration annotations. +func (o *ViewLastAppliedOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + r := f.NewBuilder(). + Unstructured(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + ResourceTypeOrNameArgs(enforceNamespace, args...). + SelectAllParam(o.All). + LabelSelectorParam(o.Selector). + Latest(). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + configString, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return err + } + if configString == nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("no last-applied-configuration annotation found on resource: %s\n", info.Name), info.Source, err) + } + o.LastAppliedConfigurationList = append(o.LastAppliedConfigurationList, string(configString)) + return nil + }) + + if err != nil { + return err + } + + return nil +} + +// Validate checks ViewLastAppliedOptions for validity. +func (o *ViewLastAppliedOptions) Validate(cmd *cobra.Command) error { + return nil +} + +// RunApplyViewLastApplied executes the `view-last-applied` command according to ViewLastAppliedOptions. +func (o *ViewLastAppliedOptions) RunApplyViewLastApplied(cmd *cobra.Command) error { + for _, str := range o.LastAppliedConfigurationList { + switch o.OutputFormat { + case "json": + jsonBuffer := &bytes.Buffer{} + err := json.Indent(jsonBuffer, []byte(str), "", " ") + if err != nil { + return err + } + fmt.Fprint(o.Out, string(jsonBuffer.Bytes())) + case "yaml": + yamlOutput, err := yaml.JSONToYAML([]byte(str)) + if err != nil { + return err + } + fmt.Fprint(o.Out, string(yamlOutput)) + default: + return cmdutil.UsageErrorf( + cmd, + "Unexpected -o output mode: %s, the flag 'output' must be one of yaml|json", + o.OutputFormat) + } + } + + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go new file mode 100644 index 000000000..f14fdd066 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go @@ -0,0 +1,252 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "encoding/json" + "fmt" + "io" + "time" + + "github.com/jonboulle/clockwork" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/resource" + oapi "k8s.io/kube-openapi/pkg/util/proto" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/openapi" +) + +const ( + // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure + maxPatchRetry = 5 + // backOffPeriod is the period to back off when apply patch results in error. + backOffPeriod = 1 * time.Second + // how many times we can retry before back off + triesBeforeBackOff = 1 +) + +// Patcher defines options to patch OpenAPI objects. +type Patcher struct { + Mapping *meta.RESTMapping + Helper *resource.Helper + + Overwrite bool + BackOff clockwork.Clock + + Force bool + CascadingStrategy metav1.DeletionPropagation + Timeout time.Duration + GracePeriod int + + // If set, forces the patch against a specific resourceVersion + ResourceVersion *string + + // Number of retries to make if the patch fails with conflict + Retries int + + OpenapiSchema openapi.Resources +} + +func newPatcher(o *ApplyOptions, info *resource.Info, helper *resource.Helper) (*Patcher, error) { + var openapiSchema openapi.Resources + if o.OpenAPIPatch { + openapiSchema = o.OpenAPISchema + } + + return &Patcher{ + Mapping: info.Mapping, + Helper: helper, + Overwrite: o.Overwrite, + BackOff: clockwork.NewRealClock(), + Force: o.DeleteOptions.ForceDeletion, + CascadingStrategy: o.DeleteOptions.CascadingStrategy, + Timeout: o.DeleteOptions.Timeout, + GracePeriod: o.DeleteOptions.GracePeriod, + OpenapiSchema: openapiSchema, + Retries: maxPatchRetry, + }, nil +} + +func (p *Patcher) delete(namespace, name string) error { + options := asDeleteOptions(p.CascadingStrategy, p.GracePeriod) + _, err := p.Helper.DeleteWithOptions(namespace, name, &options) + return err +} + +func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + // Serialize the current configuration of the object from the server. + current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("serializing current configuration from:\n%v\nfor:", obj), source, err) + } + + // Retrieve the original configuration of the object from the annotation. + original, err := util.GetOriginalConfiguration(obj) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("retrieving original configuration from:\n%v\nfor:", obj), source, err) + } + + var patchType types.PatchType + var patch []byte + var lookupPatchMeta strategicpatch.LookupPatchMeta + var schema oapi.Schema + createPatchErrFormat := "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfor:" + + // Create the versioned struct from the type defined in the restmapping + // (which is the API version we'll be submitting the patch to) + versionedObject, err := scheme.Scheme.New(p.Mapping.GroupVersionKind) + switch { + case runtime.IsNotRegisteredError(err): + // fall back to generic JSON merge patch + patchType = types.MergePatchType + preconditions := []mergepatch.PreconditionFunc{mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), mergepatch.RequireMetadataKeyUnchanged("name")} + patch, err = jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current, preconditions...) + if err != nil { + if mergepatch.IsPreconditionFailed(err) { + return nil, nil, fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") + } + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) + } + case err != nil: + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("getting instance of versioned object for %v:", p.Mapping.GroupVersionKind), source, err) + case err == nil: + // Compute a three way strategic merge patch to send to server. + patchType = types.StrategicMergePatchType + + // Try to use openapi first if the openapi spec is available and can successfully calculate the patch. + // Otherwise, fall back to baked-in types. + if p.OpenapiSchema != nil { + if schema = p.OpenapiSchema.LookupResource(p.Mapping.GroupVersionKind); schema != nil { + lookupPatchMeta = strategicpatch.PatchMetaFromOpenAPI{Schema: schema} + if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { + fmt.Fprintf(errOut, "warning: error calculating patch from openapi spec: %v\n", err) + } else { + patchType = types.StrategicMergePatchType + patch = openapiPatch + } + } + } + + if patch == nil { + lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) + } + patch, err = strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) + } + } + } + + if string(patch) == "{}" { + return patch, obj, nil + } + + if p.ResourceVersion != nil { + patch, err = addResourceVersion(patch, *p.ResourceVersion) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr("Failed to insert resourceVersion in patch", source, err) + } + } + + patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, nil) + return patch, patchedObj, err +} + +// Patch tries to patch an OpenAPI resource. On success, returns the merge patch as well +// the final patched object. On failure, returns an error. +func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + var getErr error + patchBytes, patchObject, err := p.patchSimple(current, modified, source, namespace, name, errOut) + if p.Retries == 0 { + p.Retries = maxPatchRetry + } + for i := 1; i <= p.Retries && errors.IsConflict(err); i++ { + if i > triesBeforeBackOff { + p.BackOff.Sleep(backOffPeriod) + } + current, getErr = p.Helper.Get(namespace, name) + if getErr != nil { + return nil, nil, getErr + } + patchBytes, patchObject, err = p.patchSimple(current, modified, source, namespace, name, errOut) + } + if err != nil && (errors.IsConflict(err) || errors.IsInvalid(err)) && p.Force { + patchBytes, patchObject, err = p.deleteAndCreate(current, modified, namespace, name) + } + return patchBytes, patchObject, err +} + +func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { + if err := p.delete(namespace, name); err != nil { + return modified, nil, err + } + // TODO: use wait + if err := wait.PollImmediate(1*time.Second, p.Timeout, func() (bool, error) { + if _, err := p.Helper.Get(namespace, name); !errors.IsNotFound(err) { + return false, err + } + return true, nil + }); err != nil { + return modified, nil, err + } + versionedObject, _, err := unstructured.UnstructuredJSONScheme.Decode(modified, nil, nil) + if err != nil { + return modified, nil, err + } + createdObject, err := p.Helper.Create(namespace, true, versionedObject) + if err != nil { + // restore the original object if we fail to create the new one + // but still propagate and advertise error to user + recreated, recreateErr := p.Helper.Create(namespace, true, original) + if recreateErr != nil { + err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr) + } else { + createdObject = recreated + } + } + return modified, createdObject, err +} + +func addResourceVersion(patch []byte, rv string) ([]byte, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{Object: patchMap} + a, err := meta.Accessor(&u) + if err != nil { + return nil, err + } + a.SetResourceVersion(rv) + + return json.Marshal(patchMap) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go b/vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go new file mode 100644 index 000000000..04ce54e56 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go @@ -0,0 +1,159 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/prune" +) + +type pruner struct { + mapper meta.RESTMapper + dynamicClient dynamic.Interface + + visitedUids sets.String + visitedNamespaces sets.String + labelSelector string + fieldSelector string + + cascadingStrategy metav1.DeletionPropagation + dryRunStrategy cmdutil.DryRunStrategy + gracePeriod int + + toPrinter func(string) (printers.ResourcePrinter, error) + + out io.Writer +} + +func newPruner(o *ApplyOptions) pruner { + return pruner{ + mapper: o.Mapper, + dynamicClient: o.DynamicClient, + + labelSelector: o.Selector, + visitedUids: o.VisitedUids, + visitedNamespaces: o.VisitedNamespaces, + + cascadingStrategy: o.DeleteOptions.CascadingStrategy, + dryRunStrategy: o.DryRunStrategy, + gracePeriod: o.DeleteOptions.GracePeriod, + + toPrinter: o.ToPrinter, + + out: o.Out, + } +} + +func (p *pruner) pruneAll(o *ApplyOptions) error { + + namespacedRESTMappings, nonNamespacedRESTMappings, err := prune.GetRESTMappings(o.Mapper, o.PruneResources) + if err != nil { + return fmt.Errorf("error retrieving RESTMappings to prune: %v", err) + } + + for n := range p.visitedNamespaces { + for _, m := range namespacedRESTMappings { + if err := p.prune(n, m); err != nil { + return fmt.Errorf("error pruning namespaced object %v: %v", m.GroupVersionKind, err) + } + } + } + for _, m := range nonNamespacedRESTMappings { + if err := p.prune(metav1.NamespaceNone, m); err != nil { + return fmt.Errorf("error pruning nonNamespaced object %v: %v", m.GroupVersionKind, err) + } + } + + return nil +} + +func (p *pruner) prune(namespace string, mapping *meta.RESTMapping) error { + objList, err := p.dynamicClient.Resource(mapping.Resource). + Namespace(namespace). + List(context.TODO(), metav1.ListOptions{ + LabelSelector: p.labelSelector, + FieldSelector: p.fieldSelector, + }) + if err != nil { + return err + } + + objs, err := meta.ExtractList(objList) + if err != nil { + return err + } + + for _, obj := range objs { + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + annots := metadata.GetAnnotations() + if _, ok := annots[corev1.LastAppliedConfigAnnotation]; !ok { + // don't prune resources not created with apply + continue + } + uid := metadata.GetUID() + if p.visitedUids.Has(string(uid)) { + continue + } + name := metadata.GetName() + if p.dryRunStrategy != cmdutil.DryRunClient { + if err := p.delete(namespace, name, mapping); err != nil { + return err + } + } + + printer, err := p.toPrinter("pruned") + if err != nil { + return err + } + printer.PrintObj(obj, p.out) + } + return nil +} + +func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { + return runDelete(namespace, name, mapping, p.dynamicClient, p.cascadingStrategy, p.gracePeriod, p.dryRunStrategy == cmdutil.DryRunServer) +} + +func runDelete(namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascadingStrategy metav1.DeletionPropagation, gracePeriod int, serverDryRun bool) error { + options := asDeleteOptions(cascadingStrategy, gracePeriod) + if serverDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + return c.Resource(mapping.Resource).Namespace(namespace).Delete(context.TODO(), name, options) +} + +func asDeleteOptions(cascadingStrategy metav1.DeletionPropagation, gracePeriod int) metav1.DeleteOptions { + options := metav1.DeleteOptions{} + if gracePeriod >= 0 { + options = *metav1.NewDeleteOptions(int64(gracePeriod)) + } + options.PropagationPolicy = &cascadingStrategy + return options +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/delete/delete.go b/vendor/k8s.io/kubectl/pkg/cmd/delete/delete.go new file mode 100644 index 000000000..090234201 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/delete/delete.go @@ -0,0 +1,442 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package delete + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + cmdwait "k8s.io/kubectl/pkg/cmd/wait" + "k8s.io/kubectl/pkg/rawhttp" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + deleteLong = templates.LongDesc(i18n.T(` + Delete resources by file names, stdin, resources and names, or by resources and label selector. + + JSON and YAML formats are accepted. Only one type of argument may be specified: file names, + resources and names, or resources and label selector. + + Some resources, such as pods, support graceful deletion. These resources define a default period + before they are forcibly terminated (the grace period) but you may override that value with + the --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often + represent entities in the cluster, deletion may not be acknowledged immediately. If the node + hosting a pod is down or cannot reach the API server, termination may take significantly longer + than the grace period. To force delete a resource, you must specify the --force flag. + Note: only a subset of resources support graceful deletion. In absence of the support, + the --grace-period flag is ignored. + + IMPORTANT: Force deleting pods does not wait for confirmation that the pod's processes have been + terminated, which can leave those processes running until the node detects the deletion and + completes graceful deletion. If your processes use shared storage or talk to a remote API and + depend on the name of the pod to identify themselves, force deleting those pods may result in + multiple processes running on different machines using the same identification which may lead + to data corruption or inconsistency. Only force delete pods when you are sure the pod is + terminated, or if your application can tolerate multiple copies of the same pod running at once. + Also, if you force delete pods, the scheduler may place new pods on those nodes before the node + has released those resources and causing those pods to be evicted immediately. + + Note that the delete command does NOT do resource version checks, so if someone submits an + update to a resource right when you submit a delete, their update will be lost along with the + rest of the resource. + + After a CustomResourceDefinition is deleted, invalidation of discovery cache may take up + to 10 minutes. If you don't want to wait, you might want to run "kubectl api-resources" + to refresh the discovery cache.`)) + + deleteExample = templates.Examples(i18n.T(` + # Delete a pod using the type and name specified in pod.json + kubectl delete -f ./pod.json + + # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml + kubectl delete -k dir + + # Delete resources from all files that end with '.json' - i.e. expand wildcard characters in file names + kubectl apply -f '*.json' + + # Delete a pod based on the type and name in the JSON passed into stdin + cat pod.json | kubectl delete -f - + + # Delete pods and services with same names "baz" and "foo" + kubectl delete pod,service baz foo + + # Delete pods and services with label name=myLabel + kubectl delete pods,services -l name=myLabel + + # Delete a pod with minimal delay + kubectl delete pod foo --now + + # Force delete a pod on a dead node + kubectl delete pod foo --force + + # Delete all pods + kubectl delete pods --all`)) +) + +type DeleteOptions struct { + resource.FilenameOptions + + LabelSelector string + FieldSelector string + DeleteAll bool + DeleteAllNamespaces bool + CascadingStrategy metav1.DeletionPropagation + IgnoreNotFound bool + DeleteNow bool + ForceDeletion bool + WaitForDeletion bool + Quiet bool + WarnClusterScope bool + Raw string + + GracePeriod int + Timeout time.Duration + + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.QueryParamVerifier + + Output string + + DynamicClient dynamic.Interface + Mapper meta.RESTMapper + Result *resource.Result + + genericclioptions.IOStreams +} + +func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { + deleteFlags := NewDeleteCommandFlags("containing the resource to delete.") + + cmd := &cobra.Command{ + Use: "delete ([-f FILENAME] | [-k DIRECTORY] | TYPE [(NAME | -l label | --all)])", + DisableFlagsInUseLine: true, + Short: i18n.T("Delete resources by file names, stdin, resources and names, or by resources and label selector"), + Long: deleteLong, + Example: deleteExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + o, err := deleteFlags.ToOptions(nil, streams) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunDelete(f)) + }, + SuggestFor: []string{"rm"}, + } + + deleteFlags.AddFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + return cmd +} + +func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Command) error { + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.WarnClusterScope = enforceNamespace && !o.DeleteAllNamespaces + + if o.DeleteAll || len(o.LabelSelector) > 0 || len(o.FieldSelector) > 0 { + if f := cmd.Flags().Lookup("ignore-not-found"); f != nil && !f.Changed { + // If the user didn't explicitly set the option, default to ignoring NotFound errors when used with --all, -l, or --field-selector + o.IgnoreNotFound = true + } + } + if o.DeleteNow { + if o.GracePeriod != -1 { + return fmt.Errorf("--now and --grace-period cannot be specified together") + } + o.GracePeriod = 1 + } + if o.GracePeriod == 0 && !o.ForceDeletion { + // To preserve backwards compatibility, but prevent accidental data loss, we convert --grace-period=0 + // into --grace-period=1. Users may provide --force to bypass this conversion. + o.GracePeriod = 1 + } + if o.ForceDeletion && o.GracePeriod < 0 { + o.GracePeriod = 0 + } + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewQueryParamVerifier(dynamicClient, f.OpenAPIGetter(), resource.QueryParamDryRun) + + if len(o.Raw) == 0 { + r := f.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + LabelSelectorParam(o.LabelSelector). + FieldSelectorParam(o.FieldSelector). + SelectAllParam(o.DeleteAll). + AllNamespaces(o.DeleteAllNamespaces). + ResourceTypeOrNameArgs(false, args...).RequireObject(false). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + o.Result = r + + o.Mapper, err = f.ToRESTMapper() + if err != nil { + return err + } + + o.DynamicClient, err = f.DynamicClient() + if err != nil { + return err + } + } + + return nil +} + +func (o *DeleteOptions) Validate() error { + if o.Output != "" && o.Output != "name" { + return fmt.Errorf("unexpected -o output mode: %v. We only support '-o name'", o.Output) + } + + if o.DeleteAll && len(o.LabelSelector) > 0 { + return fmt.Errorf("cannot set --all and --selector at the same time") + } + if o.DeleteAll && len(o.FieldSelector) > 0 { + return fmt.Errorf("cannot set --all and --field-selector at the same time") + } + + switch { + case o.GracePeriod == 0 && o.ForceDeletion: + fmt.Fprintf(o.ErrOut, "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n") + case o.GracePeriod > 0 && o.ForceDeletion: + return fmt.Errorf("--force and --grace-period greater than 0 cannot be specified together") + } + + if len(o.Raw) > 0 { + if len(o.FilenameOptions.Filenames) > 1 { + return fmt.Errorf("--raw can only use a single local file or stdin") + } else if len(o.FilenameOptions.Filenames) == 1 { + if strings.Index(o.FilenameOptions.Filenames[0], "http://") == 0 || strings.Index(o.FilenameOptions.Filenames[0], "https://") == 0 { + return fmt.Errorf("--raw cannot read from a url") + } + } + + if o.FilenameOptions.Recursive { + return fmt.Errorf("--raw and --recursive are mutually exclusive") + } + if len(o.Output) > 0 { + return fmt.Errorf("--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(o.Raw); err != nil { + return fmt.Errorf("--raw must be a valid URL path: %v", err) + } + } + + return nil +} + +func (o *DeleteOptions) RunDelete(f cmdutil.Factory) error { + if len(o.Raw) > 0 { + restClient, err := f.RESTClient() + if err != nil { + return err + } + if len(o.Filenames) == 0 { + return rawhttp.RawDelete(restClient, o.IOStreams, o.Raw, "") + } + return rawhttp.RawDelete(restClient, o.IOStreams, o.Raw, o.Filenames[0]) + } + return o.DeleteResult(o.Result) +} + +func (o *DeleteOptions) DeleteResult(r *resource.Result) error { + found := 0 + if o.IgnoreNotFound { + r = r.IgnoreErrors(errors.IsNotFound) + } + warnClusterScope := o.WarnClusterScope + deletedInfos := []*resource.Info{} + uidMap := cmdwait.UIDMap{} + err := r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + deletedInfos = append(deletedInfos, info) + found++ + + options := &metav1.DeleteOptions{} + if o.GracePeriod >= 0 { + options = metav1.NewDeleteOptions(int64(o.GracePeriod)) + } + options.PropagationPolicy = &o.CascadingStrategy + + if warnClusterScope && info.Mapping.Scope.Name() == meta.RESTScopeNameRoot { + fmt.Fprintf(o.ErrOut, "warning: deleting cluster-scoped resources, not scoped to the provided namespace\n") + warnClusterScope = false + } + + if o.DryRunStrategy == cmdutil.DryRunClient { + if !o.Quiet { + o.PrintObj(info) + } + return nil + } + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } + response, err := o.deleteResource(info, options) + if err != nil { + return err + } + resourceLocation := cmdwait.ResourceLocation{ + GroupResource: info.Mapping.Resource.GroupResource(), + Namespace: info.Namespace, + Name: info.Name, + } + if status, ok := response.(*metav1.Status); ok && status.Details != nil { + uidMap[resourceLocation] = status.Details.UID + return nil + } + responseMetadata, err := meta.Accessor(response) + if err != nil { + // we don't have UID, but we didn't fail the delete, next best thing is just skipping the UID + klog.V(1).Info(err) + return nil + } + uidMap[resourceLocation] = responseMetadata.GetUID() + + return nil + }) + if err != nil { + return err + } + if found == 0 { + fmt.Fprintf(o.Out, "No resources found\n") + return nil + } + if !o.WaitForDeletion { + return nil + } + // if we don't have a dynamic client, we don't want to wait. Eventually when delete is cleaned up, this will likely + // drop out. + if o.DynamicClient == nil { + return nil + } + + // If we are dry-running, then we don't want to wait + if o.DryRunStrategy != cmdutil.DryRunNone { + return nil + } + + effectiveTimeout := o.Timeout + if effectiveTimeout == 0 { + // if we requested to wait forever, set it to a week. + effectiveTimeout = 168 * time.Hour + } + waitOptions := cmdwait.WaitOptions{ + ResourceFinder: genericclioptions.ResourceFinderForResult(resource.InfoListVisitor(deletedInfos)), + UIDMap: uidMap, + DynamicClient: o.DynamicClient, + Timeout: effectiveTimeout, + + Printer: printers.NewDiscardingPrinter(), + ConditionFn: cmdwait.IsDeleted, + IOStreams: o.IOStreams, + } + err = waitOptions.RunWait() + if errors.IsForbidden(err) || errors.IsMethodNotSupported(err) { + // if we're forbidden from waiting, we shouldn't fail. + // if the resource doesn't support a verb we need, we shouldn't fail. + klog.V(1).Info(err) + return nil + } + return err +} + +func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav1.DeleteOptions) (runtime.Object, error) { + deleteResponse, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + DeleteWithOptions(info.Namespace, info.Name, deleteOptions) + if err != nil { + return nil, cmdutil.AddSourceToErr("deleting", info.Source, err) + } + + if !o.Quiet { + o.PrintObj(info) + } + return deleteResponse, nil +} + +// PrintObj for deleted objects is special because we do not have an object to print. +// This mirrors name printer behavior +func (o *DeleteOptions) PrintObj(info *resource.Info) { + operation := "deleted" + groupKind := info.Mapping.GroupVersionKind + kindString := fmt.Sprintf("%s.%s", strings.ToLower(groupKind.Kind), groupKind.Group) + if len(groupKind.Group) == 0 { + kindString = strings.ToLower(groupKind.Kind) + } + + if o.GracePeriod == 0 { + operation = "force deleted" + } + + switch o.DryRunStrategy { + case cmdutil.DryRunClient: + operation = fmt.Sprintf("%s (dry run)", operation) + case cmdutil.DryRunServer: + operation = fmt.Sprintf("%s (server dry run)", operation) + } + + if o.Output == "name" { + // -o name: prints resource/name + fmt.Fprintf(o.Out, "%s/%s\n", kindString, info.Name) + return + } + + // understandable output by default + fmt.Fprintf(o.Out, "%s \"%s\" %s\n", kindString, info.Name, operation) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go b/vendor/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go new file mode 100644 index 000000000..f56b92d06 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go @@ -0,0 +1,251 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package delete + +import ( + "fmt" + "strconv" + "time" + + "github.com/spf13/cobra" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +// DeleteFlags composes common printer flag structs +// used for commands requiring deletion logic. +type DeleteFlags struct { + FileNameFlags *genericclioptions.FileNameFlags + LabelSelector *string + FieldSelector *string + + All *bool + AllNamespaces *bool + CascadingStrategy *string + Force *bool + GracePeriod *int + IgnoreNotFound *bool + Now *bool + Timeout *time.Duration + Wait *bool + Output *string + Raw *string +} + +func (f *DeleteFlags) ToOptions(dynamicClient dynamic.Interface, streams genericclioptions.IOStreams) (*DeleteOptions, error) { + options := &DeleteOptions{ + DynamicClient: dynamicClient, + IOStreams: streams, + } + + // add filename options + if f.FileNameFlags != nil { + options.FilenameOptions = f.FileNameFlags.ToOptions() + } + if f.LabelSelector != nil { + options.LabelSelector = *f.LabelSelector + } + if f.FieldSelector != nil { + options.FieldSelector = *f.FieldSelector + } + + // add output format + if f.Output != nil { + options.Output = *f.Output + } + + if f.All != nil { + options.DeleteAll = *f.All + } + if f.AllNamespaces != nil { + options.DeleteAllNamespaces = *f.AllNamespaces + } + if f.CascadingStrategy != nil { + var err error + options.CascadingStrategy, err = parseCascadingFlag(streams, *f.CascadingStrategy) + if err != nil { + return nil, err + } + } + if f.Force != nil { + options.ForceDeletion = *f.Force + } + if f.GracePeriod != nil { + options.GracePeriod = *f.GracePeriod + } + if f.IgnoreNotFound != nil { + options.IgnoreNotFound = *f.IgnoreNotFound + } + if f.Now != nil { + options.DeleteNow = *f.Now + } + if f.Timeout != nil { + options.Timeout = *f.Timeout + } + if f.Wait != nil { + options.WaitForDeletion = *f.Wait + } + if f.Raw != nil { + options.Raw = *f.Raw + } + + return options, nil +} + +func (f *DeleteFlags) AddFlags(cmd *cobra.Command) { + f.FileNameFlags.AddFlags(cmd.Flags()) + if f.LabelSelector != nil { + cmdutil.AddLabelSelectorFlagVar(cmd, f.LabelSelector) + } + if f.FieldSelector != nil { + cmd.Flags().StringVarP(f.FieldSelector, "field-selector", "", *f.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + } + if f.All != nil { + cmd.Flags().BoolVar(f.All, "all", *f.All, "Delete all resources, in the namespace of the specified resource types.") + } + if f.AllNamespaces != nil { + cmd.Flags().BoolVarP(f.AllNamespaces, "all-namespaces", "A", *f.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + } + if f.Force != nil { + cmd.Flags().BoolVar(f.Force, "force", *f.Force, "If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation.") + } + if f.CascadingStrategy != nil { + cmd.Flags().StringVar( + f.CascadingStrategy, + "cascade", + *f.CascadingStrategy, + `Must be "background", "orphan", or "foreground". Selects the deletion cascading strategy for the dependents (e.g. Pods created by a ReplicationController). Defaults to background.`) + cmd.Flags().Lookup("cascade").NoOptDefVal = "background" + } + if f.Now != nil { + cmd.Flags().BoolVar(f.Now, "now", *f.Now, "If true, resources are signaled for immediate shutdown (same as --grace-period=1).") + } + if f.GracePeriod != nil { + cmd.Flags().IntVar(f.GracePeriod, "grace-period", *f.GracePeriod, "Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion).") + } + if f.Timeout != nil { + cmd.Flags().DurationVar(f.Timeout, "timeout", *f.Timeout, "The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object") + } + if f.IgnoreNotFound != nil { + cmd.Flags().BoolVar(f.IgnoreNotFound, "ignore-not-found", *f.IgnoreNotFound, "Treat \"resource not found\" as a successful delete. Defaults to \"true\" when --all is specified.") + } + if f.Wait != nil { + cmd.Flags().BoolVar(f.Wait, "wait", *f.Wait, "If true, wait for resources to be gone before returning. This waits for finalizers.") + } + if f.Output != nil { + cmd.Flags().StringVarP(f.Output, "output", "o", *f.Output, "Output mode. Use \"-o name\" for shorter output (resource/name).") + } + if f.Raw != nil { + cmd.Flags().StringVar(f.Raw, "raw", *f.Raw, "Raw URI to DELETE to the server. Uses the transport specified by the kubeconfig file.") + } +} + +// NewDeleteCommandFlags provides default flags and values for use with the "delete" command +func NewDeleteCommandFlags(usage string) *DeleteFlags { + cascadingStrategy := "background" + gracePeriod := -1 + + // setup command defaults + all := false + allNamespaces := false + force := false + ignoreNotFound := false + now := false + output := "" + labelSelector := "" + fieldSelector := "" + timeout := time.Duration(0) + wait := true + raw := "" + + filenames := []string{} + recursive := false + kustomize := "" + + return &DeleteFlags{ + // Not using helpers.go since it provides function to add '-k' for FileNameOptions, but not FileNameFlags + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Kustomize: &kustomize, Recursive: &recursive}, + LabelSelector: &labelSelector, + FieldSelector: &fieldSelector, + + CascadingStrategy: &cascadingStrategy, + GracePeriod: &gracePeriod, + + All: &all, + AllNamespaces: &allNamespaces, + Force: &force, + IgnoreNotFound: &ignoreNotFound, + Now: &now, + Timeout: &timeout, + Wait: &wait, + Output: &output, + Raw: &raw, + } +} + +// NewDeleteFlags provides default flags and values for use in commands outside of "delete" +func NewDeleteFlags(usage string) *DeleteFlags { + cascadingStrategy := "background" + gracePeriod := -1 + + force := false + timeout := time.Duration(0) + wait := false + + filenames := []string{} + kustomize := "" + recursive := false + + return &DeleteFlags{ + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Kustomize: &kustomize, Recursive: &recursive}, + + CascadingStrategy: &cascadingStrategy, + GracePeriod: &gracePeriod, + + // add non-defaults + Force: &force, + Timeout: &timeout, + Wait: &wait, + } +} + +func parseCascadingFlag(streams genericclioptions.IOStreams, cascadingFlag string) (metav1.DeletionPropagation, error) { + boolValue, err := strconv.ParseBool(cascadingFlag) + // The flag is not a boolean + if err != nil { + switch cascadingFlag { + case "orphan": + return metav1.DeletePropagationOrphan, nil + case "foreground": + return metav1.DeletePropagationForeground, nil + case "background": + return metav1.DeletePropagationBackground, nil + default: + return metav1.DeletePropagationBackground, fmt.Errorf(`invalid cascade value (%v). Must be "background", "foreground", or "orphan"`, cascadingFlag) + } + } + // The flag was a boolean + if boolValue { + fmt.Fprintf(streams.ErrOut, "warning: --cascade=%v is deprecated (boolean value) and can be replaced with --cascade=%s.\n", cascadingFlag, "background") + return metav1.DeletePropagationBackground, nil + } + fmt.Fprintf(streams.ErrOut, "warning: --cascade=%v is deprecated (boolean value) and can be replaced with --cascade=%s.\n", cascadingFlag, "orphan") + return metav1.DeletePropagationOrphan, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go new file mode 100644 index 000000000..524a81f3e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crlf + +import ( + "bytes" + "io" +) + +type crlfWriter struct { + io.Writer +} + +// NewCRLFWriter implements a CR/LF line ending writer used for normalizing +// text for Windows platforms. +func NewCRLFWriter(w io.Writer) io.Writer { + return crlfWriter{w} +} + +func (w crlfWriter) Write(b []byte) (n int, err error) { + for i, written := 0, 0; ; { + next := bytes.Index(b[i:], []byte("\n")) + if next == -1 { + n, err := w.Writer.Write(b[i:]) + return written + n, err + } + next = next + i + n, err := w.Writer.Write(b[i:next]) + if err != nil { + return written + n, err + } + written += n + n, err = w.Writer.Write([]byte("\r\n")) + if err != nil { + if n > 1 { + n = 1 + } + return written + n, err + } + written++ + i = next + 1 + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go new file mode 100644 index 000000000..c9a47fc67 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go @@ -0,0 +1,928 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package editor + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + goruntime "runtime" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor/crlf" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/slice" +) + +var SupportedSubresources = []string{"status"} + +// EditOptions contains all the options for running edit cli command. +type EditOptions struct { + resource.FilenameOptions + RecordFlags *genericclioptions.RecordFlags + + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + + OutputPatch bool + WindowsLineEndings bool + + cmdutil.ValidateOptions + ValidationDirective string + FieldValidationVerifier *resource.QueryParamVerifier + + OriginalResult *resource.Result + + EditMode EditMode + + CmdNamespace string + ApplyAnnotation bool + ChangeCause string + + managedFields map[types.UID][]metav1.ManagedFieldsEntry + + genericclioptions.IOStreams + + Recorder genericclioptions.Recorder + f cmdutil.Factory + editPrinterOptions *editPrinterOptions + updatedResultGetter func(data []byte) *resource.Result + + FieldManager string + + Subresource string +} + +// NewEditOptions returns an initialized EditOptions instance +func NewEditOptions(editMode EditMode, ioStreams genericclioptions.IOStreams) *EditOptions { + return &EditOptions{ + RecordFlags: genericclioptions.NewRecordFlags(), + + EditMode: editMode, + + PrintFlags: genericclioptions.NewPrintFlags("edited").WithTypeSetter(scheme.Scheme), + + editPrinterOptions: &editPrinterOptions{ + // create new editor-specific PrintFlags, with all + // output flags disabled, except json / yaml + printFlags: (&genericclioptions.PrintFlags{ + JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(), + }).WithDefaultOutput("yaml"), + ext: ".yaml", + addHeader: true, + }, + + WindowsLineEndings: goruntime.GOOS == "windows", + + Recorder: genericclioptions.NoopRecorder{}, + + IOStreams: ioStreams, + } +} + +type editPrinterOptions struct { + printFlags *genericclioptions.PrintFlags + ext string + addHeader bool +} + +func (e *editPrinterOptions) Complete(fromPrintFlags *genericclioptions.PrintFlags) error { + if e.printFlags == nil { + return fmt.Errorf("missing PrintFlags in editor printer options") + } + + // bind output format from existing printflags + if fromPrintFlags != nil && len(*fromPrintFlags.OutputFormat) > 0 { + e.printFlags.OutputFormat = fromPrintFlags.OutputFormat + } + + // prevent a commented header at the top of the user's + // default editor if presenting contents as json. + if *e.printFlags.OutputFormat == "json" { + e.addHeader = false + e.ext = ".json" + return nil + } + + // we default to yaml if check above is false, as only json or yaml are supported + e.addHeader = true + e.ext = ".yaml" + return nil +} + +func (e *editPrinterOptions) PrintObj(obj runtime.Object, out io.Writer) error { + p, err := e.printFlags.ToPrinter() + if err != nil { + return err + } + + return p.PrintObj(obj, out) +} + +// Complete completes all the required options +func (o *EditOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Command) error { + var err error + + o.RecordFlags.Complete(cmd) + o.Recorder, err = o.RecordFlags.ToRecorder() + if err != nil { + return err + } + + if o.EditMode != NormalEditMode && o.EditMode != EditBeforeCreateMode && o.EditMode != ApplyEditMode { + return fmt.Errorf("unsupported edit mode %q", o.EditMode) + } + + o.editPrinterOptions.Complete(o.PrintFlags) + + if o.OutputPatch && o.EditMode != NormalEditMode { + return fmt.Errorf("the edit mode doesn't support output the patch") + } + + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + b := f.NewBuilder(). + Unstructured() + if o.EditMode == NormalEditMode || o.EditMode == ApplyEditMode { + // when do normal edit or apply edit we need to always retrieve the latest resource from server + b = b.ResourceTypeOrNameArgs(true, args...).Latest() + } + r := b.NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + Subresource(o.Subresource). + ContinueOnError(). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + o.OriginalResult = r + + o.updatedResultGetter = func(data []byte) *resource.Result { + // resource builder to read objects from edited data + return f.NewBuilder(). + Unstructured(). + Stream(bytes.NewReader(data), "edited-file"). + Subresource(o.Subresource). + ContinueOnError(). + Flatten(). + Do() + } + + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + return o.PrintFlags.ToPrinter() + } + + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + o.FieldValidationVerifier = resource.NewQueryParamVerifier(dynamicClient, f.OpenAPIGetter(), resource.QueryParamFieldValidation) + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + o.CmdNamespace = cmdNamespace + o.f = f + + return nil +} + +// Validate checks the EditOptions to see if there is sufficient information to run the command. +func (o *EditOptions) Validate() error { + if len(o.Subresource) > 0 && !slice.ContainsString(SupportedSubresources, o.Subresource, nil) { + return fmt.Errorf("invalid subresource value: %q. Must be one of %v", o.Subresource, SupportedSubresources) + } + return nil +} + +// Run performs the execution +func (o *EditOptions) Run() error { + edit := NewDefaultEditor(editorEnvs()) + // editFn is invoked for each edit session (once with a list for normal edit, once for each individual resource in a edit-on-create invocation) + editFn := func(infos []*resource.Info) error { + var ( + results = editResults{} + original = []byte{} + edited = []byte{} + file string + err error + ) + + containsError := false + // loop until we succeed or cancel editing + for { + // get the object we're going to serialize as input to the editor + var originalObj runtime.Object + switch len(infos) { + case 1: + originalObj = infos[0].Object + default: + l := &unstructured.UnstructuredList{ + Object: map[string]interface{}{ + "kind": "List", + "apiVersion": "v1", + "metadata": map[string]interface{}{}, + }, + } + for _, info := range infos { + l.Items = append(l.Items, *info.Object.(*unstructured.Unstructured)) + } + originalObj = l + } + + // generate the file to edit + buf := &bytes.Buffer{} + var w io.Writer = buf + if o.WindowsLineEndings { + w = crlf.NewCRLFWriter(w) + } + + if o.editPrinterOptions.addHeader { + results.header.writeTo(w, o.EditMode) + } + + if !containsError { + if err := o.extractManagedFields(originalObj); err != nil { + return preservedFile(err, results.file, o.ErrOut) + } + + if err := o.editPrinterOptions.PrintObj(originalObj, w); err != nil { + return preservedFile(err, results.file, o.ErrOut) + } + original = buf.Bytes() + } else { + // In case of an error, preserve the edited file. + // Remove the comments (header) from it since we already + // have included the latest header in the buffer above. + buf.Write(cmdutil.ManualStrip(edited)) + } + + // launch the editor + editedDiff := edited + edited, file, err = edit.LaunchTempFile(fmt.Sprintf("%s-edit-", filepath.Base(os.Args[0])), o.editPrinterOptions.ext, buf) + if err != nil { + return preservedFile(err, results.file, o.ErrOut) + } + + // If we're retrying the loop because of an error, and no change was made in the file, short-circuit + if containsError && bytes.Equal(cmdutil.StripComments(editedDiff), cmdutil.StripComments(edited)) { + return preservedFile(fmt.Errorf("%s", "Edit cancelled, no valid changes were saved."), file, o.ErrOut) + } + // cleanup any file from the previous pass + if len(results.file) > 0 { + os.Remove(results.file) + } + klog.V(4).Infof("User edited:\n%s", string(edited)) + + // Apply validation + schema, err := o.f.Validator(o.ValidationDirective, o.FieldValidationVerifier) + if err != nil { + return preservedFile(err, file, o.ErrOut) + } + err = schema.ValidateBytes(cmdutil.StripComments(edited)) + if err != nil { + results = editResults{ + file: file, + } + containsError = true + fmt.Fprintln(o.ErrOut, results.addError(apierrors.NewInvalid(corev1.SchemeGroupVersion.WithKind("").GroupKind(), + "", field.ErrorList{field.Invalid(nil, "The edited file failed validation", fmt.Sprintf("%v", err))}), infos[0])) + continue + } + + // Compare content without comments + if bytes.Equal(cmdutil.StripComments(original), cmdutil.StripComments(edited)) { + os.Remove(file) + fmt.Fprintln(o.ErrOut, "Edit cancelled, no changes made.") + return nil + } + + lines, err := hasLines(bytes.NewBuffer(edited)) + if err != nil { + return preservedFile(err, file, o.ErrOut) + } + if !lines { + os.Remove(file) + fmt.Fprintln(o.ErrOut, "Edit cancelled, saved file was empty.") + return nil + } + + results = editResults{ + file: file, + } + + // parse the edited file + updatedInfos, err := o.updatedResultGetter(edited).Infos() + if err != nil { + // syntax error + containsError = true + results.header.reasons = append(results.header.reasons, editReason{head: fmt.Sprintf("The edited file had a syntax error: %v", err)}) + continue + } + + // not a syntax error as it turns out... + containsError = false + updatedVisitor := resource.InfoListVisitor(updatedInfos) + + // we need to add back managedFields to both updated and original object + if err := o.restoreManagedFields(updatedInfos); err != nil { + return preservedFile(err, file, o.ErrOut) + } + if err := o.restoreManagedFields(infos); err != nil { + return preservedFile(err, file, o.ErrOut) + } + + // need to make sure the original namespace wasn't changed while editing + if err := updatedVisitor.Visit(resource.RequireNamespace(o.CmdNamespace)); err != nil { + return preservedFile(err, file, o.ErrOut) + } + + // iterate through all items to apply annotations + if err := o.visitAnnotation(updatedVisitor); err != nil { + return preservedFile(err, file, o.ErrOut) + } + + switch o.EditMode { + case NormalEditMode: + err = o.visitToPatch(infos, updatedVisitor, &results) + case ApplyEditMode: + err = o.visitToApplyEditPatch(infos, updatedVisitor) + case EditBeforeCreateMode: + err = o.visitToCreate(updatedVisitor) + default: + err = fmt.Errorf("unsupported edit mode %q", o.EditMode) + } + if err != nil { + return preservedFile(err, results.file, o.ErrOut) + } + + // Handle all possible errors + // + // 1. retryable: propose kubectl replace -f + // 2. notfound: indicate the location of the saved configuration of the deleted resource + // 3. invalid: retry those on the spot by looping ie. reloading the editor + if results.retryable > 0 { + fmt.Fprintf(o.ErrOut, "You can run `%s replace -f %s` to try this update again.\n", filepath.Base(os.Args[0]), file) + return cmdutil.ErrExit + } + if results.notfound > 0 { + fmt.Fprintf(o.ErrOut, "The edits you made on deleted resources have been saved to %q\n", file) + return cmdutil.ErrExit + } + + if len(results.edit) == 0 { + if results.notfound == 0 { + os.Remove(file) + } else { + fmt.Fprintf(o.Out, "The edits you made on deleted resources have been saved to %q\n", file) + } + return nil + } + + if len(results.header.reasons) > 0 { + containsError = true + } + } + } + + switch o.EditMode { + // If doing normal edit we cannot use Visit because we need to edit a list for convenience. Ref: #20519 + case NormalEditMode: + infos, err := o.OriginalResult.Infos() + if err != nil { + return err + } + if len(infos) == 0 { + return errors.New("edit cancelled, no objects found") + } + return editFn(infos) + case ApplyEditMode: + infos, err := o.OriginalResult.Infos() + if err != nil { + return err + } + var annotationInfos []*resource.Info + for i := range infos { + data, err := util.GetOriginalConfiguration(infos[i].Object) + if err != nil { + return err + } + if data == nil { + continue + } + + tempInfos, err := o.updatedResultGetter(data).Infos() + if err != nil { + return err + } + annotationInfos = append(annotationInfos, tempInfos[0]) + } + if len(annotationInfos) == 0 { + return errors.New("no last-applied-configuration annotation found on resources, to create the annotation, use command `kubectl apply set-last-applied --create-annotation`") + } + return editFn(annotationInfos) + // If doing an edit before created, we don't want a list and instead want the normal behavior as kubectl create. + case EditBeforeCreateMode: + return o.OriginalResult.Visit(func(info *resource.Info, err error) error { + return editFn([]*resource.Info{info}) + }) + default: + return fmt.Errorf("unsupported edit mode %q", o.EditMode) + } +} + +func (o *EditOptions) extractManagedFields(obj runtime.Object) error { + o.managedFields = make(map[types.UID][]metav1.ManagedFieldsEntry) + if meta.IsListType(obj) { + err := meta.EachListItem(obj, func(obj runtime.Object) error { + uid, mf, err := clearManagedFields(obj) + if err != nil { + return err + } + o.managedFields[uid] = mf + return nil + }) + return err + } + uid, mf, err := clearManagedFields(obj) + if err != nil { + return err + } + o.managedFields[uid] = mf + return nil +} + +func clearManagedFields(obj runtime.Object) (types.UID, []metav1.ManagedFieldsEntry, error) { + metaObjs, err := meta.Accessor(obj) + if err != nil { + return "", nil, err + } + mf := metaObjs.GetManagedFields() + metaObjs.SetManagedFields(nil) + return metaObjs.GetUID(), mf, nil +} + +func (o *EditOptions) restoreManagedFields(infos []*resource.Info) error { + for _, info := range infos { + metaObjs, err := meta.Accessor(info.Object) + if err != nil { + return err + } + mf := o.managedFields[metaObjs.GetUID()] + metaObjs.SetManagedFields(mf) + } + return nil +} + +func (o *EditOptions) visitToApplyEditPatch(originalInfos []*resource.Info, patchVisitor resource.Visitor) error { + err := patchVisitor.Visit(func(info *resource.Info, incomingErr error) error { + editObjUID, err := meta.NewAccessor().UID(info.Object) + if err != nil { + return err + } + + var originalInfo *resource.Info + for _, i := range originalInfos { + originalObjUID, err := meta.NewAccessor().UID(i.Object) + if err != nil { + return err + } + if editObjUID == originalObjUID { + originalInfo = i + break + } + } + if originalInfo == nil { + return fmt.Errorf("no original object found for %#v", info.Object) + } + + originalJS, err := encodeToJSON(originalInfo.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + editedJS, err := encodeToJSON(info.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + if reflect.DeepEqual(originalJS, editedJS) { + printer, err := o.ToPrinter("skipped") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + } + err = o.annotationPatch(info) + if err != nil { + return err + } + + printer, err := o.ToPrinter("edited") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + }) + return err +} + +func (o *EditOptions) annotationPatch(update *resource.Info) error { + patch, _, patchType, err := GetApplyPatch(update.Object.(runtime.Unstructured)) + if err != nil { + return err + } + mapping := update.ResourceMapping() + client, err := o.f.UnstructuredClientForMapping(mapping) + if err != nil { + return err + } + helper := resource.NewHelper(client, mapping). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective). + WithSubresource(o.Subresource) + _, err = helper.Patch(o.CmdNamespace, update.Name, patchType, patch, nil) + if err != nil { + return err + } + return nil +} + +// GetApplyPatch is used to get and apply patches +func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, error) { + beforeJSON, err := encodeToJSON(obj) + if err != nil { + return nil, []byte(""), types.MergePatchType, err + } + objCopy := obj.DeepCopyObject() + accessor := meta.NewAccessor() + annotations, err := accessor.Annotations(objCopy) + if err != nil { + return nil, beforeJSON, types.MergePatchType, err + } + if annotations == nil { + annotations = map[string]string{} + } + annotations[corev1.LastAppliedConfigAnnotation] = string(beforeJSON) + accessor.SetAnnotations(objCopy, annotations) + afterJSON, err := encodeToJSON(objCopy.(runtime.Unstructured)) + if err != nil { + return nil, beforeJSON, types.MergePatchType, err + } + patch, err := jsonpatch.CreateMergePatch(beforeJSON, afterJSON) + return patch, beforeJSON, types.MergePatchType, err +} + +func encodeToJSON(obj runtime.Unstructured) ([]byte, error) { + serialization, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + js, err := yaml.ToJSON(serialization) + if err != nil { + return nil, err + } + return js, nil +} + +func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor resource.Visitor, results *editResults) error { + err := patchVisitor.Visit(func(info *resource.Info, incomingErr error) error { + editObjUID, err := meta.NewAccessor().UID(info.Object) + if err != nil { + return err + } + + var originalInfo *resource.Info + for _, i := range originalInfos { + originalObjUID, err := meta.NewAccessor().UID(i.Object) + if err != nil { + return err + } + if editObjUID == originalObjUID { + originalInfo = i + break + } + } + if originalInfo == nil { + return fmt.Errorf("no original object found for %#v", info.Object) + } + + originalJS, err := encodeToJSON(originalInfo.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + editedJS, err := encodeToJSON(info.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + if reflect.DeepEqual(originalJS, editedJS) { + // no edit, so just skip it. + printer, err := o.ToPrinter("skipped") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + } + + preconditions := []mergepatch.PreconditionFunc{ + mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), + mergepatch.RequireMetadataKeyUnchanged("name"), + mergepatch.RequireKeyUnchanged("managedFields"), + } + + // Create the versioned struct from the type defined in the mapping + // (which is the API version we'll be submitting the patch to) + versionedObject, err := scheme.Scheme.New(info.Mapping.GroupVersionKind) + var patchType types.PatchType + var patch []byte + switch { + case runtime.IsNotRegisteredError(err): + // fall back to generic JSON merge patch + patchType = types.MergePatchType + patch, err = jsonpatch.CreateMergePatch(originalJS, editedJS) + if err != nil { + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + return err + } + var patchMap map[string]interface{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + return err + } + for _, precondition := range preconditions { + if !precondition(patchMap) { + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") + } + } + case err != nil: + return err + default: + patchType = types.StrategicMergePatchType + patch, err = strategicpatch.CreateTwoWayMergePatch(originalJS, editedJS, versionedObject, preconditions...) + if err != nil { + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + if mergepatch.IsPreconditionFailed(err) { + return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") + } + return err + } + } + + if o.OutputPatch { + fmt.Fprintf(o.Out, "Patch: %s\n", string(patch)) + } + + patched, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective). + WithSubresource(o.Subresource). + Patch(info.Namespace, info.Name, patchType, patch, nil) + if err != nil { + fmt.Fprintln(o.ErrOut, results.addError(err, info)) + return nil + } + info.Refresh(patched, true) + printer, err := o.ToPrinter("edited") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + }) + return err +} + +func (o *EditOptions) visitToCreate(createVisitor resource.Visitor) error { + err := createVisitor.Visit(func(info *resource.Info, incomingErr error) error { + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective). + Create(info.Namespace, true, info.Object) + if err != nil { + return err + } + info.Refresh(obj, true) + printer, err := o.ToPrinter("created") + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + }) + return err +} + +func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error { + // iterate through all items to apply annotations + err := annotationVisitor.Visit(func(info *resource.Info, incomingErr error) error { + // put configuration annotation in "updates" + if o.ApplyAnnotation { + if err := util.CreateOrUpdateAnnotation(true, info.Object, scheme.DefaultJSONEncoder()); err != nil { + return err + } + } + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + + return nil + + }) + return err +} + +// EditMode can be either NormalEditMode, EditBeforeCreateMode or ApplyEditMode +type EditMode string + +const ( + // NormalEditMode is an edit mode + NormalEditMode EditMode = "normal_mode" + + // EditBeforeCreateMode is an edit mode + EditBeforeCreateMode EditMode = "edit_before_create_mode" + + // ApplyEditMode is an edit mode + ApplyEditMode EditMode = "edit_last_applied_mode" +) + +// editReason preserves a message about the reason this file must be edited again +type editReason struct { + head string + other []string +} + +// editHeader includes a list of reasons the edit must be retried +type editHeader struct { + reasons []editReason +} + +// writeTo outputs the current header information into a stream +func (h *editHeader) writeTo(w io.Writer, editMode EditMode) error { + if editMode == ApplyEditMode { + fmt.Fprint(w, `# Please edit the 'last-applied-configuration' annotations below. +# Lines beginning with a '#' will be ignored, and an empty file will abort the edit. +# +`) + } else { + fmt.Fprint(w, `# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +`) + } + + for _, r := range h.reasons { + if len(r.other) > 0 { + fmt.Fprintf(w, "# %s:\n", hashOnLineBreak(r.head)) + } else { + fmt.Fprintf(w, "# %s\n", hashOnLineBreak(r.head)) + } + for _, o := range r.other { + fmt.Fprintf(w, "# * %s\n", hashOnLineBreak(o)) + } + fmt.Fprintln(w, "#") + } + return nil +} + +// editResults capture the result of an update +type editResults struct { + header editHeader + retryable int + notfound int + edit []*resource.Info + file string +} + +func (r *editResults) addError(err error, info *resource.Info) string { + resourceString := info.Mapping.Resource.Resource + if len(info.Mapping.Resource.Group) > 0 { + resourceString = resourceString + "." + info.Mapping.Resource.Group + } + + switch { + case apierrors.IsInvalid(err): + r.edit = append(r.edit, info) + reason := editReason{ + head: fmt.Sprintf("%s %q was not valid", resourceString, info.Name), + } + if err, ok := err.(apierrors.APIStatus); ok { + if details := err.Status().Details; details != nil { + for _, cause := range details.Causes { + reason.other = append(reason.other, fmt.Sprintf("%s: %s", cause.Field, cause.Message)) + } + } + } + r.header.reasons = append(r.header.reasons, reason) + return fmt.Sprintf("error: %s %q is invalid", resourceString, info.Name) + case apierrors.IsNotFound(err): + r.notfound++ + return fmt.Sprintf("error: %s %q could not be found on the server", resourceString, info.Name) + default: + r.retryable++ + return fmt.Sprintf("error: %s %q could not be patched: %v", resourceString, info.Name, err) + } +} + +// preservedFile writes out a message about the provided file if it exists to the +// provided output stream when an error happens. Used to notify the user where +// their updates were preserved. +func preservedFile(err error, path string, out io.Writer) error { + if len(path) > 0 { + if _, err := os.Stat(path); !os.IsNotExist(err) { + fmt.Fprintf(out, "A copy of your changes has been stored to %q\n", path) + } + } + return err +} + +// hasLines returns true if any line in the provided stream is non empty - has non-whitespace +// characters, or the first non-whitespace character is a '#' indicating a comment. Returns +// any errors encountered reading the stream. +func hasLines(r io.Reader) (bool, error) { + // TODO: if any files we read have > 64KB lines, we'll need to switch to bytes.ReadLine + // TODO: probably going to be secrets + s := bufio.NewScanner(r) + for s.Scan() { + if line := strings.TrimSpace(s.Text()); len(line) > 0 && line[0] != '#' { + return true, nil + } + } + if err := s.Err(); err != nil && err != io.EOF { + return false, err + } + return false, nil +} + +// hashOnLineBreak returns a string built from the provided string by inserting any necessary '#' +// characters after '\n' characters, indicating a comment. +func hashOnLineBreak(s string) string { + r := "" + for i, ch := range s { + j := i + 1 + if j < len(s) && ch == '\n' && s[j] != '#' { + r += "\n# " + } else { + r += string(ch) + } + } + return r +} + +// editorEnvs returns an ordered list of env vars to check for editor preferences. +func editorEnvs() []string { + return []string{ + "KUBE_EDITOR", + "EDITOR", + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go new file mode 100644 index 000000000..1a31d6814 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go @@ -0,0 +1,168 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package editor + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "k8s.io/klog/v2" + + "k8s.io/kubectl/pkg/util/term" +) + +const ( + // sorry, blame Git + // TODO: on Windows rely on 'start' to launch the editor associated + // with the given file type. If we can't because of the need of + // blocking, use a script with 'ftype' and 'assoc' to detect it. + defaultEditor = "vi" + defaultShell = "/bin/bash" + windowsEditor = "notepad" + windowsShell = "cmd" +) + +// Editor holds the command-line args to fire up the editor +type Editor struct { + Args []string + Shell bool +} + +// NewDefaultEditor creates a struct Editor that uses the OS environment to +// locate the editor program, looking at EDITOR environment variable to find +// the proper command line. If the provided editor has no spaces, or no quotes, +// it is treated as a bare command to be loaded. Otherwise, the string will +// be passed to the user's shell for execution. +func NewDefaultEditor(envs []string) Editor { + args, shell := defaultEnvEditor(envs) + return Editor{ + Args: args, + Shell: shell, + } +} + +func defaultEnvShell() []string { + shell := os.Getenv("SHELL") + if len(shell) == 0 { + shell = platformize(defaultShell, windowsShell) + } + flag := "-c" + if shell == windowsShell { + flag = "/C" + } + return []string{shell, flag} +} + +func defaultEnvEditor(envs []string) ([]string, bool) { + var editor string + for _, env := range envs { + if len(env) > 0 { + editor = os.Getenv(env) + } + if len(editor) > 0 { + break + } + } + if len(editor) == 0 { + editor = platformize(defaultEditor, windowsEditor) + } + if !strings.Contains(editor, " ") { + return []string{editor}, false + } + if !strings.ContainsAny(editor, "\"'\\") { + return strings.Split(editor, " "), false + } + // rather than parse the shell arguments ourselves, punt to the shell + shell := defaultEnvShell() + return append(shell, editor), true +} + +func (e Editor) args(path string) []string { + args := make([]string, len(e.Args)) + copy(args, e.Args) + if e.Shell { + last := args[len(args)-1] + args[len(args)-1] = fmt.Sprintf("%s %q", last, path) + } else { + args = append(args, path) + } + return args +} + +// Launch opens the described or returns an error. The TTY will be protected, and +// SIGQUIT, SIGTERM, and SIGINT will all be trapped. +func (e Editor) Launch(path string) error { + if len(e.Args) == 0 { + return fmt.Errorf("no editor defined, can't open %s", path) + } + abs, err := filepath.Abs(path) + if err != nil { + return err + } + args := e.args(abs) + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + klog.V(5).Infof("Opening file with editor %v", args) + if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { + if err, ok := err.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { + return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " ")) + } + } + return fmt.Errorf("there was a problem with the editor %q", strings.Join(e.Args, " ")) + } + return nil +} + +// LaunchTempFile reads the provided stream into a temporary file in the given directory +// and file prefix, and then invokes Launch with the path of that file. It will return +// the contents of the file after launch, any errors that occur, and the path of the +// temporary file so the caller can clean it up as needed. +func (e Editor) LaunchTempFile(prefix, suffix string, r io.Reader) ([]byte, string, error) { + f, err := os.CreateTemp("", prefix+"*"+suffix) + if err != nil { + return nil, "", err + } + defer f.Close() + path := f.Name() + if _, err := io.Copy(f, r); err != nil { + os.Remove(path) + return nil, path, err + } + // This file descriptor needs to close so the next process (Launch) can claim it. + f.Close() + if err := e.Launch(path); err != nil { + return nil, path, err + } + bytes, err := ioutil.ReadFile(path) + return bytes, path, err +} + +func platformize(linux, windows string) string { + if runtime.GOOS == "windows" { + return windows + } + return linux +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go b/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go new file mode 100644 index 000000000..83ca3fc91 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/wait/wait.go @@ -0,0 +1,631 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/spf13/cobra" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/jsonpath" + cmdget "k8s.io/kubectl/pkg/cmd/get" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + waitLong = templates.LongDesc(i18n.T(` + Experimental: Wait for a specific condition on one or many resources. + + The command takes multiple resources and waits until the specified condition + is seen in the Status field of every given resource. + + Alternatively, the command can wait for the given set of resources to be deleted + by providing the "delete" keyword as the value to the --for flag. + + A successful message will be printed to stdout indicating when the specified + condition has been met. You can use -o option to change to output destination.`)) + + waitExample = templates.Examples(i18n.T(` + # Wait for the pod "busybox1" to contain the status condition of type "Ready" + kubectl wait --for=condition=Ready pod/busybox1 + + # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): + kubectl wait --for=condition=Ready=false pod/busybox1 + + # Wait for the pod "busybox1" to contain the status phase to be "Running". + kubectl wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 + + # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command + kubectl delete pod/busybox1 + kubectl wait --for=delete pod/busybox1 --timeout=60s`)) +) + +// errNoMatchingResources is returned when there is no resources matching a query. +var errNoMatchingResources = errors.New("no matching resources found") + +// WaitFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type WaitFlags struct { + RESTClientGetter genericclioptions.RESTClientGetter + PrintFlags *genericclioptions.PrintFlags + ResourceBuilderFlags *genericclioptions.ResourceBuilderFlags + + Timeout time.Duration + ForCondition string + + genericclioptions.IOStreams +} + +// NewWaitFlags returns a default WaitFlags +func NewWaitFlags(restClientGetter genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *WaitFlags { + return &WaitFlags{ + RESTClientGetter: restClientGetter, + PrintFlags: genericclioptions.NewPrintFlags("condition met"), + ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(). + WithLabelSelector(""). + WithFieldSelector(""). + WithAll(false). + WithAllNamespaces(false). + WithLocal(false). + WithLatest(), + + Timeout: 30 * time.Second, + + IOStreams: streams, + } +} + +// NewCmdWait returns a cobra command for waiting +func NewCmdWait(restClientGetter genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command { + flags := NewWaitFlags(restClientGetter, streams) + + cmd := &cobra.Command{ + Use: "wait ([-f FILENAME] | resource.group/resource.name | resource.group [(-l label | --all)]) [--for=delete|--for condition=available|--for=jsonpath='{}'=value]", + Short: i18n.T("Experimental: Wait for a specific condition on one or many resources"), + Long: waitLong, + Example: waitExample, + + DisableFlagsInUseLine: true, + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(args) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.RunWait()) + }, + SuggestFor: []string{"list", "ps"}, + } + + flags.AddFlags(cmd) + + return cmd +} + +// AddFlags registers flags for a cli +func (flags *WaitFlags) AddFlags(cmd *cobra.Command) { + flags.PrintFlags.AddFlags(cmd) + flags.ResourceBuilderFlags.AddFlags(cmd.Flags()) + + cmd.Flags().DurationVar(&flags.Timeout, "timeout", flags.Timeout, "The length of time to wait before giving up. Zero means check once and don't wait, negative means wait for a week.") + cmd.Flags().StringVar(&flags.ForCondition, "for", flags.ForCondition, "The condition to wait on: [delete|condition=condition-name[=condition-value]|jsonpath='{JSONPath expression}'=JSONPath Condition]. The default condition-value is true. Condition values are compared after Unicode simple case folding, which is a more general form of case-insensitivity.") +} + +// ToOptions converts from CLI inputs to runtime inputs +func (flags *WaitFlags) ToOptions(args []string) (*WaitOptions, error) { + printer, err := flags.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + builder := flags.ResourceBuilderFlags.ToBuilder(flags.RESTClientGetter, args) + clientConfig, err := flags.RESTClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + conditionFn, err := conditionFuncFor(flags.ForCondition, flags.ErrOut) + if err != nil { + return nil, err + } + + effectiveTimeout := flags.Timeout + if effectiveTimeout < 0 { + effectiveTimeout = 168 * time.Hour + } + + o := &WaitOptions{ + ResourceFinder: builder, + DynamicClient: dynamicClient, + Timeout: effectiveTimeout, + ForCondition: flags.ForCondition, + + Printer: printer, + ConditionFn: conditionFn, + IOStreams: flags.IOStreams, + } + + return o, nil +} + +func conditionFuncFor(condition string, errOut io.Writer) (ConditionFunc, error) { + if strings.ToLower(condition) == "delete" { + return IsDeleted, nil + } + if strings.HasPrefix(condition, "condition=") { + conditionName := condition[len("condition="):] + conditionValue := "true" + if equalsIndex := strings.Index(conditionName, "="); equalsIndex != -1 { + conditionValue = conditionName[equalsIndex+1:] + conditionName = conditionName[0:equalsIndex] + } + + return ConditionalWait{ + conditionName: conditionName, + conditionStatus: conditionValue, + errOut: errOut, + }.IsConditionMet, nil + } + if strings.HasPrefix(condition, "jsonpath=") { + splitStr := strings.Split(condition, "=") + if len(splitStr) != 3 { + return nil, fmt.Errorf("jsonpath wait format must be --for=jsonpath='{.status.readyReplicas}'=3") + } + jsonPathExp, jsonPathCond, err := processJSONPathInput(splitStr[1], splitStr[2]) + if err != nil { + return nil, err + } + j, err := newJSONPathParser(jsonPathExp) + if err != nil { + return nil, err + } + return JSONPathWait{ + jsonPathCondition: jsonPathCond, + jsonPathParser: j, + errOut: errOut, + }.IsJSONPathConditionMet, nil + } + + return nil, fmt.Errorf("unrecognized condition: %q", condition) +} + +// newJSONPathParser will create a new JSONPath parser based on the jsonPathExpression +func newJSONPathParser(jsonPathExpression string) (*jsonpath.JSONPath, error) { + j := jsonpath.New("wait") + if jsonPathExpression == "" { + return nil, errors.New("jsonpath expression cannot be empty") + } + if err := j.Parse(jsonPathExpression); err != nil { + return nil, err + } + return j, nil +} + +// processJSONPathInput will parses the user's JSONPath input and process the string +func processJSONPathInput(jsonPathExpression, jsonPathCond string) (string, string, error) { + relaxedJSONPathExp, err := cmdget.RelaxedJSONPathExpression(jsonPathExpression) + if err != nil { + return "", "", err + } + if jsonPathCond == "" { + return "", "", errors.New("jsonpath wait condition cannot be empty") + } + jsonPathCond = strings.Trim(jsonPathCond, `'"`) + + return relaxedJSONPathExp, jsonPathCond, nil +} + +// ResourceLocation holds the location of a resource +type ResourceLocation struct { + GroupResource schema.GroupResource + Namespace string + Name string +} + +// UIDMap maps ResourceLocation with UID +type UIDMap map[ResourceLocation]types.UID + +// WaitOptions is a set of options that allows you to wait. This is the object reflects the runtime needs of a wait +// command, making the logic itself easy to unit test with our existing mocks. +type WaitOptions struct { + ResourceFinder genericclioptions.ResourceFinder + // UIDMap maps a resource location to a UID. It is optional, but ConditionFuncs may choose to use it to make the result + // more reliable. For instance, delete can look for UID consistency during delegated calls. + UIDMap UIDMap + DynamicClient dynamic.Interface + Timeout time.Duration + ForCondition string + + Printer printers.ResourcePrinter + ConditionFn ConditionFunc + genericclioptions.IOStreams +} + +// ConditionFunc is the interface for providing condition checks +type ConditionFunc func(info *resource.Info, o *WaitOptions) (finalObject runtime.Object, done bool, err error) + +// RunWait runs the waiting logic +func (o *WaitOptions) RunWait() error { + visitCount := 0 + visitFunc := func(info *resource.Info, err error) error { + if err != nil { + return err + } + + visitCount++ + finalObject, success, err := o.ConditionFn(info, o) + if success { + o.Printer.PrintObj(finalObject, o.Out) + return nil + } + if err == nil { + return fmt.Errorf("%v unsatisified for unknown reason", finalObject) + } + return err + } + visitor := o.ResourceFinder.Do() + isForDelete := strings.ToLower(o.ForCondition) == "delete" + if visitor, ok := visitor.(*resource.Result); ok && isForDelete { + visitor.IgnoreErrors(apierrors.IsNotFound) + } + + err := visitor.Visit(visitFunc) + if err != nil { + return err + } + if visitCount == 0 && !isForDelete { + return errNoMatchingResources + } + return err +} + +// IsDeleted is a condition func for waiting for something to be deleted +func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + endTime := time.Now().Add(o.Timeout) + for { + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + nameSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + + // List with a name field selector to get the current resourceVersion to watch from (not the object's resourceVersion) + gottenObjList, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: nameSelector}) + if apierrors.IsNotFound(err) { + return info.Object, true, nil + } + if err != nil { + // TODO this could do something slightly fancier if we wish + return info.Object, false, err + } + if len(gottenObjList.Items) != 1 { + return info.Object, true, nil + } + gottenObj := &gottenObjList.Items[0] + resourceLocation := ResourceLocation{ + GroupResource: info.Mapping.Resource.GroupResource(), + Namespace: gottenObj.GetNamespace(), + Name: gottenObj.GetName(), + } + if uid, ok := o.UIDMap[resourceLocation]; ok { + if gottenObj.GetUID() != uid { + return gottenObj, true, nil + } + } + + watchOptions := metav1.ListOptions{} + watchOptions.FieldSelector = nameSelector + watchOptions.ResourceVersion = gottenObjList.GetResourceVersion() + objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), watchOptions) + if err != nil { + return gottenObj, false, err + } + + timeout := endTime.Sub(time.Now()) + errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) + if timeout < 0 { + // we're out of time + return gottenObj, false, errWaitTimeoutWithName + } + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout) + watchEvent, err := watchtools.UntilWithoutRetry(ctx, objWatch, Wait{errOut: o.ErrOut}.IsDeleted) + cancel() + switch { + case err == nil: + return watchEvent.Object, true, nil + case err == watchtools.ErrWatchClosed: + continue + case err == wait.ErrWaitTimeout: + if watchEvent != nil { + return watchEvent.Object, false, errWaitTimeoutWithName + } + return gottenObj, false, errWaitTimeoutWithName + default: + return gottenObj, false, err + } + } +} + +// Wait has helper methods for handling watches, including error handling. +type Wait struct { + errOut io.Writer +} + +// IsDeleted returns true if the object is deleted. It prints any errors it encounters. +func (w Wait) IsDeleted(event watch.Event) (bool, error) { + switch event.Type { + case watch.Error: + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server if the error is unrecoverable. + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the object to be deleted: %v", err) + return false, nil + case watch.Deleted: + return true, nil + default: + return false, nil + } +} + +type isCondMetFunc func(event watch.Event) (bool, error) +type checkCondFunc func(obj *unstructured.Unstructured) (bool, error) + +// getObjAndCheckCondition will make a List query to the API server to get the object and check if the condition is met using check function. +// If the condition is not met, it will make a Watch query to the server and pass in the condMet function +func getObjAndCheckCondition(info *resource.Info, o *WaitOptions, condMet isCondMetFunc, check checkCondFunc) (runtime.Object, bool, error) { + endTime := time.Now().Add(o.Timeout) + for { + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + nameSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + + var gottenObj *unstructured.Unstructured + // List with a name field selector to get the current resourceVersion to watch from (not the object's resourceVersion) + gottenObjList, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: nameSelector}) + + resourceVersion := "" + switch { + case err != nil: + return info.Object, false, err + case len(gottenObjList.Items) != 1: + resourceVersion = gottenObjList.GetResourceVersion() + default: + gottenObj = &gottenObjList.Items[0] + conditionMet, err := check(gottenObj) + if conditionMet { + return gottenObj, true, nil + } + if err != nil { + return gottenObj, false, err + } + resourceVersion = gottenObjList.GetResourceVersion() + } + + watchOptions := metav1.ListOptions{} + watchOptions.FieldSelector = nameSelector + watchOptions.ResourceVersion = resourceVersion + objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), watchOptions) + if err != nil { + return gottenObj, false, err + } + + timeout := endTime.Sub(time.Now()) + errWaitTimeoutWithName := extendErrWaitTimeout(wait.ErrWaitTimeout, info) + if timeout < 0 { + // we're out of time + return gottenObj, false, errWaitTimeoutWithName + } + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout) + watchEvent, err := watchtools.UntilWithoutRetry(ctx, objWatch, watchtools.ConditionFunc(condMet)) + cancel() + switch { + case err == nil: + return watchEvent.Object, true, nil + case err == watchtools.ErrWatchClosed: + continue + case err == wait.ErrWaitTimeout: + if watchEvent != nil { + return watchEvent.Object, false, errWaitTimeoutWithName + } + return gottenObj, false, errWaitTimeoutWithName + default: + return gottenObj, false, err + } + } +} + +// ConditionalWait hold information to check an API status condition +type ConditionalWait struct { + conditionName string + conditionStatus string + // errOut is written to if an error occurs + errOut io.Writer +} + +// IsConditionMet is a conditionfunc for waiting on an API condition to be met +func (w ConditionalWait) IsConditionMet(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + return getObjAndCheckCondition(info, o, w.isConditionMet, w.checkCondition) +} + +func (w ConditionalWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil { + return false, err + } + if !found { + return false, nil + } + for _, conditionUncast := range conditions { + condition := conditionUncast.(map[string]interface{}) + name, found, err := unstructured.NestedString(condition, "type") + if !found || err != nil || !strings.EqualFold(name, w.conditionName) { + continue + } + status, found, err := unstructured.NestedString(condition, "status") + if !found || err != nil { + continue + } + generation, found, _ := unstructured.NestedInt64(obj.Object, "metadata", "generation") + if found { + observedGeneration, found := getObservedGeneration(obj, condition) + if found && observedGeneration < generation { + return false, nil + } + } + return strings.EqualFold(status, w.conditionStatus), nil + } + + return false, nil +} + +func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Error { + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) + return false, nil + } + if event.Type == watch.Deleted { + // this will chain back out, result in another get and an return false back up the chain + return false, nil + } + obj := event.Object.(*unstructured.Unstructured) + return w.checkCondition(obj) +} + +func extendErrWaitTimeout(err error, info *resource.Info) error { + return fmt.Errorf("%s on %s/%s", err.Error(), info.Mapping.Resource.Resource, info.Name) +} + +func getObservedGeneration(obj *unstructured.Unstructured, condition map[string]interface{}) (int64, bool) { + conditionObservedGeneration, found, _ := unstructured.NestedInt64(condition, "observedGeneration") + if found { + return conditionObservedGeneration, true + } + statusObservedGeneration, found, _ := unstructured.NestedInt64(obj.Object, "status", "observedGeneration") + return statusObservedGeneration, found +} + +// JSONPathWait holds a JSONPath Parser which has the ability +// to check for the JSONPath condition and compare with the API server provided JSON output. +type JSONPathWait struct { + jsonPathCondition string + jsonPathParser *jsonpath.JSONPath + // errOut is written to if an error occurs + errOut io.Writer +} + +// IsJSONPathConditionMet fulfills the requirements of the interface ConditionFunc which provides condition check +func (j JSONPathWait) IsJSONPathConditionMet(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + return getObjAndCheckCondition(info, o, j.isJSONPathConditionMet, j.checkCondition) +} + +// isJSONPathConditionMet is a helper function of IsJSONPathConditionMet +// which check the watch event and check if a JSONPathWait condition is met +func (j JSONPathWait) isJSONPathConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Error { + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server + err := apierrors.FromObject(event.Object) + fmt.Fprintf(j.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) + return false, nil + } + if event.Type == watch.Deleted { + // this will chain back out, result in another get and an return false back up the chain + return false, nil + } + // event runtime Object can be safely asserted to Unstructed + // because we are working with dynamic client + obj := event.Object.(*unstructured.Unstructured) + return j.checkCondition(obj) +} + +// checkCondition uses JSONPath parser to parse the JSON received from the API server +// and check if it matches the desired condition +func (j JSONPathWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { + queryObj := obj.UnstructuredContent() + parseResults, err := j.jsonPathParser.FindResults(queryObj) + if err != nil { + return false, err + } + if err := verifyParsedJSONPath(parseResults); err != nil { + return false, err + } + isConditionMet, err := compareResults(parseResults[0][0], j.jsonPathCondition) + if err != nil { + return false, err + } + return isConditionMet, nil +} + +// verifyParsedJSONPath verifies the JSON received from the API server is valid. +// It will only accept a single JSON +func verifyParsedJSONPath(results [][]reflect.Value) error { + if len(results) == 0 { + return errors.New("given jsonpath expression does not match any value") + } + if len(results) > 1 { + return errors.New("given jsonpath expression matches more than one list") + } + if len(results[0]) > 1 { + return errors.New("given jsonpath expression matches more than one value") + } + return nil +} + +// compareResults will compare the reflect.Value from the result parsed by the +// JSONPath parser with the expected value given by the value +// +// Since this is coming from an unstructured this can only ever be a primitive, +// map[string]interface{}, or []interface{}. +// We do not support the last two and rely on fmt to handle conversion to string +// and compare the result with user input +func compareResults(r reflect.Value, expectedVal string) (bool, error) { + switch r.Interface().(type) { + case map[string]interface{}, []interface{}: + return false, errors.New("jsonpath leads to a nested object or list which is not supported") + } + s := fmt.Sprintf("%v", r.Interface()) + return strings.TrimSpace(s) == strings.TrimSpace(expectedVal), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/apply.go b/vendor/k8s.io/kubectl/pkg/util/apply.go new file mode 100644 index 000000000..77ea59384 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/apply.go @@ -0,0 +1,146 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var metadataAccessor = meta.NewAccessor() + +// GetOriginalConfiguration retrieves the original configuration of the object +// from the annotation, or nil if no annotation was found. +func GetOriginalConfiguration(obj runtime.Object) ([]byte, error) { + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + return nil, nil + } + + original, ok := annots[v1.LastAppliedConfigAnnotation] + if !ok { + return nil, nil + } + + return []byte(original), nil +} + +// SetOriginalConfiguration sets the original configuration of the object +// as the annotation on the object for later use in computing a three way patch. +func setOriginalConfiguration(obj runtime.Object, original []byte) error { + if len(original) < 1 { + return nil + } + + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return err + } + + if annots == nil { + annots = map[string]string{} + } + + annots[v1.LastAppliedConfigAnnotation] = string(original) + return metadataAccessor.SetAnnotations(obj, annots) +} + +// GetModifiedConfiguration retrieves the modified configuration of the object. +// If annotate is true, it embeds the result as an annotation in the modified +// configuration. If an object was read from the command input, it will use that +// version of the object. Otherwise, it will use the version from the server. +func GetModifiedConfiguration(obj runtime.Object, annotate bool, codec runtime.Encoder) ([]byte, error) { + // First serialize the object without the annotation to prevent recursion, + // then add that serialization to it as the annotation and serialize it again. + var modified []byte + + // Otherwise, use the server side version of the object. + // Get the current annotations from the object. + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + annots = map[string]string{} + } + + original := annots[v1.LastAppliedConfigAnnotation] + delete(annots, v1.LastAppliedConfigAnnotation) + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + + if annotate { + annots[v1.LastAppliedConfigAnnotation] = string(modified) + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + } + + // Restore the object to its original condition. + annots[v1.LastAppliedConfigAnnotation] = original + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + return modified, nil +} + +// updateApplyAnnotation calls CreateApplyAnnotation if the last applied +// configuration annotation is already present. Otherwise, it does nothing. +func updateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { + if original, err := GetOriginalConfiguration(obj); err != nil || len(original) <= 0 { + return err + } + return CreateApplyAnnotation(obj, codec) +} + +// CreateApplyAnnotation gets the modified configuration of the object, +// without embedding it again, and then sets it on the object as the annotation. +func CreateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { + modified, err := GetModifiedConfiguration(obj, false, codec) + if err != nil { + return err + } + return setOriginalConfiguration(obj, modified) +} + +// CreateOrUpdateAnnotation creates the annotation used by +// kubectl apply only when createAnnotation is true +// Otherwise, only update the annotation when it already exists +func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec runtime.Encoder) error { + if createAnnotation { + return CreateApplyAnnotation(obj, codec) + } + return updateApplyAnnotation(obj, codec) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/pod_port.go b/vendor/k8s.io/kubectl/pkg/util/pod_port.go new file mode 100644 index 000000000..6d78501a8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/pod_port.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" +) + +// LookupContainerPortNumberByName find containerPort number by its named port name +func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { + for _, ctr := range pod.Spec.Containers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } + + return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/prune/prune.go b/vendor/k8s.io/kubectl/pkg/util/prune/prune.go new file mode 100644 index 000000000..0d49153fe --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/prune/prune.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prune + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type Resource struct { + group string + version string + kind string + namespaced bool +} + +func (pr Resource) String() string { + return fmt.Sprintf("%v/%v, Kind=%v, Namespaced=%v", pr.group, pr.version, pr.kind, pr.namespaced) +} + +func GetRESTMappings(mapper meta.RESTMapper, pruneResources []Resource) (namespaced, nonNamespaced []*meta.RESTMapping, err error) { + if len(pruneResources) == 0 { + // default allowlist + pruneResources = []Resource{ + {"", "v1", "ConfigMap", true}, + {"", "v1", "Endpoints", true}, + {"", "v1", "Namespace", false}, + {"", "v1", "PersistentVolumeClaim", true}, + {"", "v1", "PersistentVolume", false}, + {"", "v1", "Pod", true}, + {"", "v1", "ReplicationController", true}, + {"", "v1", "Secret", true}, + {"", "v1", "Service", true}, + {"batch", "v1", "Job", true}, + {"batch", "v1", "CronJob", true}, + {"networking.k8s.io", "v1", "Ingress", true}, + {"apps", "v1", "DaemonSet", true}, + {"apps", "v1", "Deployment", true}, + {"apps", "v1", "ReplicaSet", true}, + {"apps", "v1", "StatefulSet", true}, + } + } + + for _, resource := range pruneResources { + addedMapping, err := mapper.RESTMapping(schema.GroupKind{Group: resource.group, Kind: resource.kind}, resource.version) + if err != nil { + return nil, nil, fmt.Errorf("invalid resource %v: %v", resource, err) + } + if resource.namespaced { + namespaced = append(namespaced, addedMapping) + } else { + nonNamespaced = append(nonNamespaced, addedMapping) + } + } + + return namespaced, nonNamespaced, nil +} + +func ParseResources(mapper meta.RESTMapper, gvks []string) ([]Resource, error) { + pruneResources := []Resource{} + for _, groupVersionKind := range gvks { + gvk := strings.Split(groupVersionKind, "/") + if len(gvk) != 3 { + return nil, fmt.Errorf("invalid GroupVersionKind format: %v, please follow ", groupVersionKind) + } + + if gvk[0] == "core" { + gvk[0] = "" + } + mapping, err := mapper.RESTMapping(schema.GroupKind{Group: gvk[0], Kind: gvk[2]}, gvk[1]) + if err != nil { + return pruneResources, err + } + var namespaced bool + namespaceScope := mapping.Scope.Name() + switch namespaceScope { + case meta.RESTScopeNameNamespace: + namespaced = true + case meta.RESTScopeNameRoot: + namespaced = false + default: + return pruneResources, fmt.Errorf("Unknown namespace scope: %q", namespaceScope) + } + + pruneResources = append(pruneResources, Resource{gvk[0], gvk[1], gvk[2], namespaced}) + } + return pruneResources, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/service_port.go b/vendor/k8s.io/kubectl/pkg/util/service_port.go new file mode 100644 index 000000000..bc56ab7d6 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/service_port.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// LookupContainerPortNumberByServicePort implements +// the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None +// It returns an error when a named port can't find a match (with -1 returned), or when the service does not +// declare such port (with the input port number returned). +func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int32) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Port != port { + continue + } + if svc.Spec.ClusterIP == v1.ClusterIPNone { + return port, nil + } + if svcportspec.TargetPort.Type == intstr.Int { + if svcportspec.TargetPort.IntValue() == 0 { + // targetPort is omitted, and the IntValue() would be zero + return svcportspec.Port, nil + } + return int32(svcportspec.TargetPort.IntValue()), nil + } + return LookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) + } + return port, fmt.Errorf("Service %s does not have a service port %d", svc.Name, port) +} + +// LookupServicePortNumberByName find service port number by its named port name +func LookupServicePortNumberByName(svc v1.Service, name string) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Name == name { + return svcportspec.Port, nil + } + } + + return int32(-1), fmt.Errorf("Service '%s' does not have a named port '%s'", svc.Name, name) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/umask.go b/vendor/k8s.io/kubectl/pkg/util/umask.go new file mode 100644 index 000000000..3f0c4e83e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/umask.go @@ -0,0 +1,29 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "golang.org/x/sys/unix" +) + +// Umask is a wrapper for `unix.Umask()` on non-Windows platforms +func Umask(mask int) (old int, err error) { + return unix.Umask(mask), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/umask_windows.go b/vendor/k8s.io/kubectl/pkg/util/umask_windows.go new file mode 100644 index 000000000..67f6efb97 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/umask_windows.go @@ -0,0 +1,29 @@ +//go:build windows +// +build windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" +) + +// Umask returns an error on Windows +func Umask(mask int) (int, error) { + return 0, errors.New("platform and architecture is not supported") +} diff --git a/vendor/k8s.io/kubectl/pkg/util/util.go b/vendor/k8s.io/kubectl/pkg/util/util.go new file mode 100644 index 000000000..af704b091 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/util.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/md5" + "errors" + "fmt" + "path" + "path/filepath" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return metav1.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: t}, nil +} + +// HashObject returns the hash of a Object hash by a Codec +func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { + data, err := runtime.Encode(codec, obj) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", md5.Sum(data)), nil +} + +// ParseFileSource parses the source given. +// +// Acceptable formats include: +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and +// source-path is the path to the key file. +// +// Key names cannot include '='. +func ParseFileSource(source string) (keyName, filePath string, err error) { + numSeparators := strings.Count(source, "=") + switch { + case numSeparators == 0: + return path.Base(filepath.ToSlash(source)), source, nil + case numSeparators == 1 && strings.HasPrefix(source, "="): + return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "=")) + case numSeparators == 1 && strings.HasSuffix(source, "="): + return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "=")) + case numSeparators > 1: + return "", "", errors.New("key names or file paths cannot contain '='") + default: + components := strings.Split(source, "=") + return components[0], components[1], nil + } +} + +// ParseLiteralSource parses the source key=val pair into its component pieces. +// This functionality is distinguished from strings.SplitN(source, "=", 2) since +// it returns an error in the case of empty keys, values, or a missing equals sign. +func ParseLiteralSource(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + + return items[0], items[1], nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0c49be90b..bc3349a65 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -202,6 +202,9 @@ github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 ## explicit github.com/inconshreveable/mousetrap +# github.com/jonboulle/clockwork v0.2.2 +## explicit; go 1.13 +github.com/jonboulle/clockwork # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -837,6 +840,7 @@ k8s.io/apimachinery/pkg/util/httpstream k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/jsonmergepatch k8s.io/apimachinery/pkg/util/managedfields k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming @@ -1408,14 +1412,20 @@ k8s.io/kube-openapi/pkg/validation/spec ## explicit; go 1.16 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/apiresources +k8s.io/kubectl/pkg/cmd/apply +k8s.io/kubectl/pkg/cmd/delete k8s.io/kubectl/pkg/cmd/exec k8s.io/kubectl/pkg/cmd/get k8s.io/kubectl/pkg/cmd/util +k8s.io/kubectl/pkg/cmd/util/editor +k8s.io/kubectl/pkg/cmd/util/editor/crlf k8s.io/kubectl/pkg/cmd/util/podcmd +k8s.io/kubectl/pkg/cmd/wait k8s.io/kubectl/pkg/describe k8s.io/kubectl/pkg/polymorphichelpers k8s.io/kubectl/pkg/rawhttp k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util k8s.io/kubectl/pkg/util/certificate k8s.io/kubectl/pkg/util/completion k8s.io/kubectl/pkg/util/deployment @@ -1426,6 +1436,7 @@ k8s.io/kubectl/pkg/util/interrupt k8s.io/kubectl/pkg/util/openapi k8s.io/kubectl/pkg/util/openapi/validation k8s.io/kubectl/pkg/util/podutils +k8s.io/kubectl/pkg/util/prune k8s.io/kubectl/pkg/util/qos k8s.io/kubectl/pkg/util/rbac k8s.io/kubectl/pkg/util/resource