From a52bfca0b80a994100ae0a76d8e9473b1156a41f Mon Sep 17 00:00:00 2001 From: lonelyCZ <531187475@qq.com> Date: Fri, 22 Apr 2022 13:52:06 +0800 Subject: [PATCH] Add logs function to karmadactl Signed-off-by: lonelyCZ <531187475@qq.com> --- go.mod | 1 + go.sum | 1 + pkg/karmadactl/karmadactl.go | 1 + pkg/karmadactl/logs.go | 473 ++ pkg/util/lifted/logs.go | 70 + pkg/util/lifted/logs_test.go | 111 + vendor/github.com/fatih/camelcase/.travis.yml | 3 + vendor/github.com/fatih/camelcase/LICENSE.md | 20 + vendor/github.com/fatih/camelcase/README.md | 58 + .../github.com/fatih/camelcase/camelcase.go | 90 + .../client-go/util/certificate/csr/csr.go | 365 ++ .../k8s.io/kubectl/pkg/apps/kind_visitor.go | 75 + .../kubectl/pkg/cmd/util/podcmd/podcmd.go | 104 + .../k8s.io/kubectl/pkg/describe/describe.go | 5553 +++++++++++++++++ .../k8s.io/kubectl/pkg/describe/interface.go | 72 + .../attachablepodforobject.go | 54 + .../pkg/polymorphichelpers/canbeexposed.go | 44 + .../kubectl/pkg/polymorphichelpers/helpers.go | 191 + .../kubectl/pkg/polymorphichelpers/history.go | 410 ++ .../pkg/polymorphichelpers/historyviewer.go | 37 + .../pkg/polymorphichelpers/interface.go | 114 + .../pkg/polymorphichelpers/logsforobject.go | 210 + .../mapbasedselectorforobject.go | 160 + .../pkg/polymorphichelpers/objectpauser.go | 65 + .../pkg/polymorphichelpers/objectrestarter.go | 119 + .../pkg/polymorphichelpers/objectresumer.go | 64 + .../pkg/polymorphichelpers/portsforobject.go | 78 + .../polymorphichelpers/protocolsforobject.go | 89 + .../pkg/polymorphichelpers/rollback.go | 500 ++ .../pkg/polymorphichelpers/rollbacker.go | 37 + .../pkg/polymorphichelpers/rollout_status.go | 152 + .../pkg/polymorphichelpers/statusviewer.go | 26 + .../pkg/polymorphichelpers/updatepodspec.go | 90 + .../pkg/util/certificate/certificate.go | 38 + .../kubectl/pkg/util/deployment/deployment.go | 257 + .../pkg/util/event/sorted_event_list.go | 36 + .../kubectl/pkg/util/fieldpath/fieldpath.go | 111 + .../kubectl/pkg/util/podutils/podutils.go | 188 + vendor/k8s.io/kubectl/pkg/util/qos/qos.go | 98 + vendor/k8s.io/kubectl/pkg/util/rbac/rbac.go | 135 + .../kubectl/pkg/util/resource/resource.go | 172 + vendor/k8s.io/kubectl/pkg/util/slice/slice.go | 38 + .../kubectl/pkg/util/storage/storage.go | 110 + vendor/modules.txt | 18 + 44 files changed, 10638 insertions(+) create mode 100644 pkg/karmadactl/logs.go create mode 100644 pkg/util/lifted/logs.go create mode 100644 pkg/util/lifted/logs_test.go create mode 100644 vendor/github.com/fatih/camelcase/.travis.yml create mode 100644 vendor/github.com/fatih/camelcase/LICENSE.md create mode 100644 vendor/github.com/fatih/camelcase/README.md create mode 100644 vendor/github.com/fatih/camelcase/camelcase.go create mode 100644 vendor/k8s.io/client-go/util/certificate/csr/csr.go create mode 100644 vendor/k8s.io/kubectl/pkg/apps/kind_visitor.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/podcmd/podcmd.go create mode 100644 vendor/k8s.io/kubectl/pkg/describe/describe.go create mode 100644 vendor/k8s.io/kubectl/pkg/describe/interface.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/attachablepodforobject.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/canbeexposed.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/history.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/historyviewer.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/mapbasedselectorforobject.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectpauser.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectrestarter.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectresumer.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/portsforobject.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/protocolsforobject.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollbacker.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/statusviewer.go create mode 100644 vendor/k8s.io/kubectl/pkg/polymorphichelpers/updatepodspec.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/certificate/certificate.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/deployment/deployment.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/event/sorted_event_list.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/fieldpath/fieldpath.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/qos/qos.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/rbac/rbac.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/resource/resource.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/slice/slice.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/storage/storage.go diff --git a/go.mod b/go.mod index 28267c166..3cb6fc19a 100644 --- a/go.mod +++ b/go.mod @@ -61,6 +61,7 @@ require ( github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fvbommel/sortorder v1.0.1 // indirect diff --git a/go.sum b/go.sum index 9544909eb..953056f1b 100644 --- a/go.sum +++ b/go.sum @@ -241,6 +241,7 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= diff --git a/pkg/karmadactl/karmadactl.go b/pkg/karmadactl/karmadactl.go index ac805f1ab..53446bee5 100644 --- a/pkg/karmadactl/karmadactl.go +++ b/pkg/karmadactl/karmadactl.go @@ -56,6 +56,7 @@ func NewKarmadaCtlCommand(out io.Writer, cmdUse, parentCommand string) *cobra.Co rootCmd.AddCommand(NewCmdGet(out, karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdTaint(out, karmadaConfig, parentCommand)) rootCmd.AddCommand(NewCmdPromote(out, karmadaConfig, parentCommand)) + rootCmd.AddCommand(NewCmdLogs(out, karmadaConfig, parentCommand)) rootCmd.AddCommand(cmdinit.NewCmdInit(out, parentCommand)) return rootCmd diff --git a/pkg/karmadactl/logs.go b/pkg/karmadactl/logs.go new file mode 100644 index 000000000..ff0693e10 --- /dev/null +++ b/pkg/karmadactl/logs.go @@ -0,0 +1,473 @@ +package karmadactl + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "regexp" + "sync" + "time" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/rest" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/polymorphichelpers" + + karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/util/gclient" + "github.com/karmada-io/karmada/pkg/util/lifted" +) + +const ( + defaultPodLogsTimeout = 20 * time.Second + logsUsageStr = "logs [-f] [-p] (POD | TYPE/NAME) [-c CONTAINER] (-C CLUSTER)" +) + +var ( + selectorTail int64 = 10 + logsUsageErrStr = fmt.Sprintf("expected '%s'.\nPOD or TYPE/NAME is a required argument for the logs command", logsUsageStr) +) + +// NewCmdLogs new logs command. +func NewCmdLogs(out io.Writer, karmadaConfig KarmadaConfig, parentCommand string) *cobra.Command { + ioStreams := genericclioptions.IOStreams{In: getIn, Out: getOut, ErrOut: getErr} + o := NewCommandLogsOptions(ioStreams, false) + cmd := &cobra.Command{ + Use: logsUsageStr, + Short: "Print the logs for a container in a pod in a cluster", + SilenceUsage: true, + Example: getLogsExample(parentCommand), + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Complete(karmadaConfig, cmd, args); err != nil { + return err + } + if err := o.Validate(); err != nil { + return err + } + if err := o.Run(); err != nil { + return err + } + return nil + }, + } + + o.GlobalCommandOptions.AddFlags(cmd.Flags()) + o.AddFlags(cmd) + + return cmd +} + +// getLogsExample logs examples by cmd type +func getLogsExample(parentCommand string) string { + example := ` +# Return snapshot logs from pod nginx with only one container in cluster(member1)` + "\n" + + fmt.Sprintf("%s logs nginx -C=member1", parentCommand) + ` + +# Return snapshot logs from pod nginx with multi containers in cluster(member1)` + "\n" + + fmt.Sprintf("%s get logs nginx --all-containers=true -C=member1", parentCommand) + ` + +# Return snapshot logs from all containers in pods defined by label app=nginx in cluster(member1)` + "\n" + + fmt.Sprintf("%s get logs -l app=nginx --all-containers=true -C=member1", parentCommand) + ` + +# Return snapshot of previous terminated ruby container logs from pod web-1 in cluster(member1)` + "\n" + + fmt.Sprintf("%s get logs -p -c ruby web-1 -C=member1", parentCommand) + ` + +# Begin streaming the logs of the ruby container in pod web-1 in cluster(member1)` + "\n" + + fmt.Sprintf("%s logs -f -c ruby web-1 -C=member1", parentCommand) + ` + +# Begin streaming the logs from all containers in pods defined by label app=nginx in cluster(member1) ` + "\n" + + fmt.Sprintf("%s logs -f -l app=nginx --all-containers=true -C=member1", parentCommand) + ` + +# Display only the most recent 20 lines of output in pod nginx in cluster(member1) ` + "\n" + + fmt.Sprintf("%s logs --tail=20 nginx -C=member1", parentCommand) + ` + +# Show all logs from pod nginx written in the last hour in cluster(member1) ` + "\n" + + fmt.Sprintf("%s --since=1h nginx -C=member1", parentCommand) + return example +} + +// CommandLogsOptions contains the input to the logs command. +type CommandLogsOptions struct { + // global flags + options.GlobalCommandOptions + + Cluster string + Namespace string + ResourceArg string + AllContainers bool + Options runtime.Object + Resources []string + + ConsumeRequestFn func(rest.ResponseWrapper, io.Writer) error + + // PodLogOptions + SinceTime string + Since time.Duration + Follow bool + Previous bool + Timestamps bool + IgnoreLogErrors bool + LimitBytes int64 + Tail int64 + Container string + InsecureSkipTLSVerifyBackend bool + + // whether or not a container name was given via --container + ContainerNameSpecified bool + Selector string + MaxFollowConcurrency int + Prefix bool + + Object runtime.Object + GetPodTimeout time.Duration + RESTClientGetter genericclioptions.RESTClientGetter + LogsForObject polymorphichelpers.LogsForObjectFunc + + genericclioptions.IOStreams + + TailSpecified bool + + containerNameFromRefSpecRegexp *regexp.Regexp + + f cmdutil.Factory +} + +// NewCommandLogsOptions returns a LogsOptions. +func NewCommandLogsOptions(streams genericclioptions.IOStreams, allContainers bool) *CommandLogsOptions { + return &CommandLogsOptions{ + IOStreams: streams, + AllContainers: allContainers, + Tail: -1, + MaxFollowConcurrency: 5, + + containerNameFromRefSpecRegexp: regexp.MustCompile(`spec\.(?:initContainers|containers|ephemeralContainers){(.+)}`), + } +} + +// AddFlags adds flags to the specified FlagSet. +func (o *CommandLogsOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().BoolVar(&o.AllContainers, "all-containers", o.AllContainers, "Get all containers' logs in the pod(s).") + cmd.Flags().BoolVarP(&o.Follow, "follow", "f", o.Follow, "Specify if the logs should be streamed.") + cmd.Flags().BoolVar(&o.Timestamps, "timestamps", o.Timestamps, "Include timestamps on each line in the log output") + cmd.Flags().Int64Var(&o.LimitBytes, "limit-bytes", o.LimitBytes, "Maximum bytes of logs to return. Defaults to no limit.") + cmd.Flags().BoolVarP(&o.Previous, "previous", "p", o.Previous, "If true, print the logs for the previous instance of the container in a pod if it exists.") + cmd.Flags().Int64Var(&o.Tail, "tail", o.Tail, "Lines of recent log file to display. Defaults to -1 with no selector, showing all log lines otherwise 10, if a selector is provided.") + cmd.Flags().BoolVar(&o.IgnoreLogErrors, "ignore-errors", o.IgnoreLogErrors, "If watching / following pod logs, allow for any errors that occur to be non-fatal") + cmd.Flags().StringVar(&o.SinceTime, "since-time", o.SinceTime, "Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.") + cmd.Flags().DurationVar(&o.Since, "since", o.Since, "Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.") + cmd.Flags().StringVarP(&o.Container, "container", "c", o.Container, "Print the logs of this container") + cmd.Flags().BoolVar(&o.InsecureSkipTLSVerifyBackend, "insecure-skip-tls-verify-backend", o.InsecureSkipTLSVerifyBackend, + "Skip verifying the identity of the kubelet that logs are requested from. In theory, an attacker could provide invalid log content back. You might want to use this if your kubelet serving certificates have expired.") + cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodLogsTimeout) + cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on.") + cmd.Flags().IntVar(&o.MaxFollowConcurrency, "max-log-requests", o.MaxFollowConcurrency, "Specify maximum number of concurrent logs to follow when using by a selector. Defaults to 5.") + cmd.Flags().BoolVar(&o.Prefix, "prefix", o.Prefix, "Prefix each log line with the log source (pod name and container name)") + + cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", "default", "-n=namespace or -n namespace") + cmd.Flags().StringVarP(&o.Cluster, "cluster", "C", "", "-C=member1") +} + +// Complete ensures that options are valid and marshals them if necessary +func (o *CommandLogsOptions) Complete(karmadaConfig KarmadaConfig, cmd *cobra.Command, args []string) error { + o.ContainerNameSpecified = cmd.Flag("container").Changed + o.TailSpecified = cmd.Flag("tail").Changed + o.Resources = args + + switch len(args) { + case 0: + if len(o.Selector) == 0 { + return cmdutil.UsageErrorf(cmd, "%s", logsUsageErrStr) + } + case 1: + o.ResourceArg = args[0] + if len(o.Selector) != 0 { + return cmdutil.UsageErrorf(cmd, "only a selector (-l) or a POD name is allowed") + } + case 2: + o.ResourceArg = args[0] + o.Container = args[1] + default: + return cmdutil.UsageErrorf(cmd, "%s", logsUsageErrStr) + } + var err error + + o.ConsumeRequestFn = lifted.DefaultConsumeRequest + + o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) + if err != nil { + return err + } + + o.Options, err = o.toLogOptions() + if err != nil { + return err + } + + if len(o.Cluster) == 0 { + return fmt.Errorf("must specify a cluster") + } + + karmadaRestConfig, err := karmadaConfig.GetRestConfig(o.KarmadaContext, o.KubeConfig) + if err != nil { + return fmt.Errorf("failed to get control plane rest config. context: %s, kube-config: %s, error: %v", + o.KarmadaContext, o.KubeConfig, err) + } + + clusterInfo, err := o.getClusterInfo(karmadaRestConfig, o.Cluster) + if err != nil { + return err + } + + o.f = getFactory(o.Cluster, clusterInfo) + + o.RESTClientGetter = o.f + o.LogsForObject = polymorphichelpers.LogsForObjectFn + + if err := o.completeObj(); err != nil { + return err + } + + return nil +} + +// Validate checks to the LogsOptions to see if there is sufficient information run the command +func (o *CommandLogsOptions) Validate() error { + if len(o.SinceTime) > 0 && o.Since != 0 { + return fmt.Errorf("at most one of `sinceTime` or `sinceSeconds` may be specified") + } + + logsOptions, ok := o.Options.(*corev1.PodLogOptions) + if !ok { + return errors.New("unexpected logs options object") + } + if o.AllContainers && len(logsOptions.Container) > 0 { + return fmt.Errorf("--all-containers=true should not be specified with container name %s", logsOptions.Container) + } + + if o.ContainerNameSpecified && len(o.Resources) == 2 { + return fmt.Errorf("only one of -c or an inline [CONTAINER] arg is allowed") + } + + if o.LimitBytes < 0 { + return fmt.Errorf("--limit-bytes must be greater than 0") + } + + if logsOptions.SinceSeconds != nil && *logsOptions.SinceSeconds < int64(0) { + return fmt.Errorf("--since must be greater than 0") + } + + if logsOptions.TailLines != nil && *logsOptions.TailLines < -1 { + return fmt.Errorf("--tail must be greater than or equal to -1") + } + + return nil +} + +// Run retrieves a pod log +func (o *CommandLogsOptions) Run() error { + requests, err := o.LogsForObject(o.RESTClientGetter, o.Object, o.Options, o.GetPodTimeout, o.AllContainers) + if err != nil { + return err + } + + if o.Follow && len(requests) > 1 { + if len(requests) > o.MaxFollowConcurrency { + return fmt.Errorf( + "you are attempting to follow %d log streams, but maximum allowed concurrency is %d, use --max-log-requests to increase the limit", + len(requests), o.MaxFollowConcurrency, + ) + } + + return o.parallelConsumeRequest(requests) + } + + return o.sequentialConsumeRequest(requests) +} + +func (o *CommandLogsOptions) parallelConsumeRequest(requests map[corev1.ObjectReference]rest.ResponseWrapper) error { + reader, writer := io.Pipe() + wg := &sync.WaitGroup{} + wg.Add(len(requests)) + for objRef, request := range requests { + go func(objRef corev1.ObjectReference, request rest.ResponseWrapper) { + defer wg.Done() + out := o.addPrefixIfNeeded(objRef, writer) + if err := o.ConsumeRequestFn(request, out); err != nil { + if !o.IgnoreLogErrors { + _ = writer.CloseWithError(err) + + // It's important to return here to propagate the error via the pipe + return + } + + fmt.Fprintf(writer, "error: %v\n", err) + } + }(objRef, request) + } + + go func() { + wg.Wait() + writer.Close() + }() + + _, err := io.Copy(o.Out, reader) + return err +} + +func (o *CommandLogsOptions) completeObj() error { + if o.Object == nil { + builder := o.f.NewBuilder(). + WithScheme(gclient.NewSchema(), gclient.NewSchema().PrioritizedVersionsAllGroups()...). + NamespaceParam(o.Namespace).DefaultNamespace(). + SingleResourceType() + if o.ResourceArg != "" { + builder.ResourceNames("pods", o.ResourceArg) + } + if o.Selector != "" { + builder.ResourceTypes("pods").LabelSelectorParam(o.Selector) + } + infos, err := builder.Do().Infos() + if err != nil { + return err + } + if o.Selector == "" && len(infos) != 1 { + return errors.New("expected a resource") + } + o.Object = infos[0].Object + if o.Selector != "" && len(o.Object.(*corev1.PodList).Items) == 0 { + fmt.Fprintf(o.ErrOut, "No resources found in %s namespace.\n", o.Namespace) + } + } + + return nil +} + +func (o *CommandLogsOptions) toLogOptions() (*corev1.PodLogOptions, error) { + logOptions := &corev1.PodLogOptions{ + Container: o.Container, + Follow: o.Follow, + Previous: o.Previous, + Timestamps: o.Timestamps, + InsecureSkipTLSVerifyBackend: o.InsecureSkipTLSVerifyBackend, + } + + if len(o.SinceTime) > 0 { + t, err := lifted.ParseRFC3339(o.SinceTime, metav1.Now) + if err != nil { + return nil, err + } + + logOptions.SinceTime = &t + } + + if o.LimitBytes != 0 { + logOptions.LimitBytes = &o.LimitBytes + } + + if o.Since != 0 { + // round up to the nearest second + sec := int64(o.Since.Round(time.Second)) + logOptions.SinceSeconds = &sec + } + + if len(o.Selector) > 0 && o.Tail == -1 && !o.TailSpecified { + logOptions.TailLines = &selectorTail + } else if o.Tail != -1 { + logOptions.TailLines = &o.Tail + } + + return logOptions, nil +} + +func (o *CommandLogsOptions) sequentialConsumeRequest(requests map[corev1.ObjectReference]rest.ResponseWrapper) error { + for objRef, request := range requests { + out := o.addPrefixIfNeeded(objRef, o.Out) + if err := o.ConsumeRequestFn(request, out); err != nil { + if !o.IgnoreLogErrors { + return err + } + + fmt.Fprintf(o.Out, "error: %v\n", err) + } + } + + return nil +} + +func (o *CommandLogsOptions) addPrefixIfNeeded(ref corev1.ObjectReference, writer io.Writer) io.Writer { + if !o.Prefix || ref.FieldPath == "" || ref.Name == "" { + return writer + } + + // We rely on ref.FieldPath to contain a reference to a container + // including a container name (not an index) so we can get a container name + // without making an extra API request. + var containerName string + containerNameMatches := o.containerNameFromRefSpecRegexp.FindStringSubmatch(ref.FieldPath) + if len(containerNameMatches) == 2 { + containerName = containerNameMatches[1] + } + + prefix := fmt.Sprintf("[pod/%s/%s] ", ref.Name, containerName) + return &prefixingWriter{ + prefix: []byte(prefix), + writer: writer, + } +} + +// getClusterInfo get information of cluster +func (o *CommandLogsOptions) getClusterInfo(karmadaRestConfig *rest.Config, name string) (map[string]*ClusterInfo, error) { + clusterClient := karmadaclientset.NewForConfigOrDie(karmadaRestConfig).ClusterV1alpha1().Clusters() + + // check if the cluster exist in karmada control plane + _, err := clusterClient.Get(context.TODO(), o.Cluster, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + clusterInfos := make(map[string]*ClusterInfo) + clusterInfos[name] = &ClusterInfo{} + + clusterInfos[name].APIEndpoint = karmadaRestConfig.Host + fmt.Sprintf(proxyURL, name) + clusterInfos[name].KubeConfig = o.KubeConfig + clusterInfos[name].Context = o.KarmadaContext + if clusterInfos[name].KubeConfig == "" { + env := os.Getenv("KUBECONFIG") + if env != "" { + clusterInfos[name].KubeConfig = env + } else { + clusterInfos[name].KubeConfig = defaultKubeConfig + } + } + + return clusterInfos, nil +} + +type prefixingWriter struct { + prefix []byte + writer io.Writer +} + +func (pw *prefixingWriter) Write(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Perform an "atomic" write of a prefix and p to make sure that it doesn't interleave + // sub-line when used concurrently with io.PipeWrite. + n, err := pw.writer.Write(append(pw.prefix, p...)) + if n > len(p) { + // To comply with the io.Writer interface requirements we must + // return a number of bytes written from p (0 <= n <= len(p)), + // so we are ignoring the length of the prefix here. + return len(p), err + } + return n, err +} diff --git a/pkg/util/lifted/logs.go b/pkg/util/lifted/logs.go new file mode 100644 index 000000000..aae84d4bd --- /dev/null +++ b/pkg/util/lifted/logs.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "bufio" + "context" + "io" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" +) + +// DefaultConsumeRequest reads the data from request and writes into +// the out writer. It buffers data from requests until the newline or io.EOF +// occurs in the data, so it doesn't interleave logs sub-line +// when running concurrently. +// +// A successful read returns err == nil, not err == io.EOF. +// Because the function is defined to read from request until io.EOF, it does +// not treat an io.EOF as an error to be reported. +func DefaultConsumeRequest(request rest.ResponseWrapper, out io.Writer) error { + readCloser, err := request.Stream(context.TODO()) + if err != nil { + return err + } + defer readCloser.Close() + + r := bufio.NewReader(readCloser) + for { + bytes, err := r.ReadBytes('\n') + if _, err := out.Write(bytes); err != nil { + return err + } + + if err != nil { + if err != io.EOF { + return err + } + return nil + } + } +} + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return metav1.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: t}, nil +} diff --git a/pkg/util/lifted/logs_test.go b/pkg/util/lifted/logs_test.go new file mode 100644 index 000000000..71956fe53 --- /dev/null +++ b/pkg/util/lifted/logs_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "strings" + "testing" + "testing/iotest" + + restclient "k8s.io/client-go/rest" +) + +func TestDefaultConsumeRequest(t *testing.T) { + tests := []struct { + name string + request restclient.ResponseWrapper + expectedErr string + expectedOut string + }{ + { + name: "error from request stream", + request: &responseWrapperMock{ + err: errors.New("err from the stream"), + }, + expectedErr: "err from the stream", + }, + { + name: "error while reading", + request: &responseWrapperMock{ + data: iotest.TimeoutReader(strings.NewReader("Some data")), + }, + expectedErr: iotest.ErrTimeout.Error(), + expectedOut: "Some data", + }, + { + name: "read with empty string", + request: &responseWrapperMock{ + data: strings.NewReader(""), + }, + expectedOut: "", + }, + { + name: "read without new lines", + request: &responseWrapperMock{ + data: strings.NewReader("some string without a new line"), + }, + expectedOut: "some string without a new line", + }, + { + name: "read with newlines in the middle", + request: &responseWrapperMock{ + data: strings.NewReader("foo\nbar"), + }, + expectedOut: "foo\nbar", + }, + { + name: "read with newline at the end", + request: &responseWrapperMock{ + data: strings.NewReader("foo\n"), + }, + expectedOut: "foo\n", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + buf := &bytes.Buffer{} + err := DefaultConsumeRequest(test.request, buf) + + if err != nil && !strings.Contains(err.Error(), test.expectedErr) { + t.Errorf("%s: expected to find:\n\t%s\nfound:\n\t%s\n", test.name, test.expectedErr, err.Error()) + } + + if buf.String() != test.expectedOut { + t.Errorf("%s: did not get expected log content. Got: %s", test.name, buf.String()) + } + }) + } +} + +type responseWrapperMock struct { + data io.Reader + err error +} + +func (r *responseWrapperMock) DoRaw(context.Context) ([]byte, error) { + data, _ := ioutil.ReadAll(r.data) + return data, r.err +} + +func (r *responseWrapperMock) Stream(context.Context) (io.ReadCloser, error) { + return ioutil.NopCloser(r.data), r.err +} diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml new file mode 100644 index 000000000..3489e3871 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.x + diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md new file mode 100644 index 000000000..aa4a536ca --- /dev/null +++ b/vendor/github.com/fatih/camelcase/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md new file mode 100644 index 000000000..105a6ae33 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/README.md @@ -0,0 +1,58 @@ +# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) + +CamelCase is a Golang (Go) package to split the words of a camelcase type +string into a slice of words. It can be used to convert a camelcase word (lower +or upper case) into any type of word. + +## Splitting rules: + +1. If string is not valid UTF-8, return it without splitting as + single item array. +2. Assign all unicode characters into one of 4 sets: lower case + letters, upper case letters, numbers, and all other characters. +3. Iterate through characters of string, introducing splits + between adjacent characters that belong to different sets. +4. Iterate through array of split strings, and if a given string + is upper case: + * if subsequent string is lower case: + * move last character of upper case string to beginning of + lower case string + +## Install + +```bash +go get github.com/fatih/camelcase +``` + +## Usage and examples + +```go +splitted := camelcase.Split("GolangPackage") + +fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" +``` + +Both lower camel case and upper camel case are supported. For more info please +check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) + +Below are some example cases: + +``` +"" => [] +"lowercase" => ["lowercase"] +"Class" => ["Class"] +"MyClass" => ["My", "Class"] +"MyC" => ["My", "C"] +"HTML" => ["HTML"] +"PDFLoader" => ["PDF", "Loader"] +"AString" => ["A", "String"] +"SimpleXMLParser" => ["Simple", "XML", "Parser"] +"vimRPCPlugin" => ["vim", "RPC", "Plugin"] +"GL11Version" => ["GL", "11", "Version"] +"99Bottles" => ["99", "Bottles"] +"May5" => ["May", "5"] +"BFG9000" => ["BFG", "9000"] +"BöseÜberraschung" => ["Böse", "Überraschung"] +"Two spaces" => ["Two", " ", "spaces"] +"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go new file mode 100644 index 000000000..02160c9a4 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/camelcase.go @@ -0,0 +1,90 @@ +// Package camelcase is a micro package to split the words of a camelcase type +// string into a slice of words. +package camelcase + +import ( + "unicode" + "unicode/utf8" +) + +// Split splits the camelcase word and returns a list of words. It also +// supports digits. Both lower camel case and upper camel case are supported. +// For more info please check: http://en.wikipedia.org/wiki/CamelCase +// +// Examples +// +// "" => [""] +// "lowercase" => ["lowercase"] +// "Class" => ["Class"] +// "MyClass" => ["My", "Class"] +// "MyC" => ["My", "C"] +// "HTML" => ["HTML"] +// "PDFLoader" => ["PDF", "Loader"] +// "AString" => ["A", "String"] +// "SimpleXMLParser" => ["Simple", "XML", "Parser"] +// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] +// "GL11Version" => ["GL", "11", "Version"] +// "99Bottles" => ["99", "Bottles"] +// "May5" => ["May", "5"] +// "BFG9000" => ["BFG", "9000"] +// "BöseÜberraschung" => ["Böse", "Überraschung"] +// "Two spaces" => ["Two", " ", "spaces"] +// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +// +// Splitting rules +// +// 1) If string is not valid UTF-8, return it without splitting as +// single item array. +// 2) Assign all unicode characters into one of 4 sets: lower case +// letters, upper case letters, numbers, and all other characters. +// 3) Iterate through characters of string, introducing splits +// between adjacent characters that belong to different sets. +// 4) Iterate through array of split strings, and if a given string +// is upper case: +// if subsequent string is lower case: +// move last character of upper case string to beginning of +// lower case string +func Split(src string) (entries []string) { + // don't split invalid utf8 + if !utf8.ValidString(src) { + return []string{src} + } + entries = []string{} + var runes [][]rune + lastClass := 0 + class := 0 + // split into fields based on class of unicode character + for _, r := range src { + switch true { + case unicode.IsLower(r): + class = 1 + case unicode.IsUpper(r): + class = 2 + case unicode.IsDigit(r): + class = 3 + default: + class = 4 + } + if class == lastClass { + runes[len(runes)-1] = append(runes[len(runes)-1], r) + } else { + runes = append(runes, []rune{r}) + } + lastClass = class + } + // handle upper case -> lower case sequences, e.g. + // "PDFL", "oader" -> "PDF", "Loader" + for i := 0; i < len(runes)-1; i++ { + if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { + runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) + runes[i] = runes[i][:len(runes[i])-1] + } + } + // construct []string from results + for _, s := range runes { + if len(s) > 0 { + entries = append(entries, string(s)) + } + } + return +} diff --git a/vendor/k8s.io/client-go/util/certificate/csr/csr.go b/vendor/k8s.io/client-go/util/certificate/csr/csr.go new file mode 100644 index 000000000..0017007a2 --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/csr/csr.go @@ -0,0 +1,365 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csr + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "fmt" + "reflect" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + certutil "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" +) + +// RequestCertificate will either use an existing (if this process has run +// before but not to completion) or create a certificate signing request using the +// PEM encoded CSR and send it to API server. An optional requestedDuration may be passed +// to set the spec.expirationSeconds field on the CSR to control the lifetime of the issued +// certificate. This is not guaranteed as the signer may choose to ignore the request. +func RequestCertificate(client clientset.Interface, csrData []byte, name, signerName string, requestedDuration *time.Duration, usages []certificatesv1.KeyUsage, privateKey interface{}) (reqName string, reqUID types.UID, err error) { + csr := &certificatesv1.CertificateSigningRequest{ + // Username, UID, Groups will be injected by API server. + TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: csrData, + Usages: usages, + SignerName: signerName, + }, + } + if len(csr.Name) == 0 { + csr.GenerateName = "csr-" + } + if requestedDuration != nil { + csr.Spec.ExpirationSeconds = DurationToExpirationSeconds(*requestedDuration) + } + + reqName, reqUID, err = create(client, csr) + switch { + case err == nil: + return reqName, reqUID, err + + case errors.IsAlreadyExists(err) && len(name) > 0: + klog.Infof("csr for this node already exists, reusing") + req, err := get(client, name) + if err != nil { + return "", "", formatError("cannot retrieve certificate signing request: %v", err) + } + if err := ensureCompatible(req, csr, privateKey); err != nil { + return "", "", fmt.Errorf("retrieved csr is not compatible: %v", err) + } + klog.Infof("csr for this node is still valid") + return req.Name, req.UID, nil + + default: + return "", "", formatError("cannot create certificate signing request: %v", err) + } +} + +func DurationToExpirationSeconds(duration time.Duration) *int32 { + return pointer.Int32(int32(duration / time.Second)) +} + +func ExpirationSecondsToDuration(expirationSeconds int32) time.Duration { + return time.Duration(expirationSeconds) * time.Second +} + +func get(client clientset.Interface, name string) (*certificatesv1.CertificateSigningRequest, error) { + v1req, v1err := client.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}) + if v1err == nil || !apierrors.IsNotFound(v1err) { + return v1req, v1err + } + + v1beta1req, v1beta1err := client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}) + if v1beta1err != nil { + return nil, v1beta1err + } + + v1req = &certificatesv1.CertificateSigningRequest{ + ObjectMeta: v1beta1req.ObjectMeta, + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: v1beta1req.Spec.Request, + }, + } + if v1beta1req.Spec.SignerName != nil { + v1req.Spec.SignerName = *v1beta1req.Spec.SignerName + } + for _, usage := range v1beta1req.Spec.Usages { + v1req.Spec.Usages = append(v1req.Spec.Usages, certificatesv1.KeyUsage(usage)) + } + return v1req, nil +} + +func create(client clientset.Interface, csr *certificatesv1.CertificateSigningRequest) (reqName string, reqUID types.UID, err error) { + // only attempt a create via v1 if we specified signerName and usages and are not using the legacy unknown signerName + if len(csr.Spec.Usages) > 0 && len(csr.Spec.SignerName) > 0 && csr.Spec.SignerName != "kubernetes.io/legacy-unknown" { + v1req, v1err := client.CertificatesV1().CertificateSigningRequests().Create(context.TODO(), csr, metav1.CreateOptions{}) + switch { + case v1err != nil && apierrors.IsNotFound(v1err): + // v1 CSR API was not found, continue to try v1beta1 + + case v1err != nil: + // other creation error + return "", "", v1err + + default: + // success + return v1req.Name, v1req.UID, v1err + } + } + + // convert relevant bits to v1beta1 + v1beta1csr := &certificatesv1beta1.CertificateSigningRequest{ + ObjectMeta: csr.ObjectMeta, + Spec: certificatesv1beta1.CertificateSigningRequestSpec{ + SignerName: &csr.Spec.SignerName, + Request: csr.Spec.Request, + }, + } + for _, usage := range csr.Spec.Usages { + v1beta1csr.Spec.Usages = append(v1beta1csr.Spec.Usages, certificatesv1beta1.KeyUsage(usage)) + } + + // create v1beta1 + v1beta1req, v1beta1err := client.CertificatesV1beta1().CertificateSigningRequests().Create(context.TODO(), v1beta1csr, metav1.CreateOptions{}) + if v1beta1err != nil { + return "", "", v1beta1err + } + return v1beta1req.Name, v1beta1req.UID, nil +} + +// WaitForCertificate waits for a certificate to be issued until timeout, or returns an error. +func WaitForCertificate(ctx context.Context, client clientset.Interface, reqName string, reqUID types.UID) (certData []byte, err error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", reqName).String() + + var lw *cache.ListWatch + var obj runtime.Object + for { + // see if the v1 API is available + if _, err := client.CertificatesV1().CertificateSigningRequests().List(ctx, metav1.ListOptions{FieldSelector: fieldSelector}); err == nil { + // watch v1 objects + obj = &certificatesv1.CertificateSigningRequest{} + lw = &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1().CertificateSigningRequests().List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1().CertificateSigningRequests().Watch(ctx, options) + }, + } + break + } else { + klog.V(2).Infof("error fetching v1 certificate signing request: %v", err) + } + + // return if we've timed out + if err := ctx.Err(); err != nil { + return nil, wait.ErrWaitTimeout + } + + // see if the v1beta1 API is available + if _, err := client.CertificatesV1beta1().CertificateSigningRequests().List(ctx, metav1.ListOptions{FieldSelector: fieldSelector}); err == nil { + // watch v1beta1 objects + obj = &certificatesv1beta1.CertificateSigningRequest{} + lw = &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1beta1().CertificateSigningRequests().List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(ctx, options) + }, + } + break + } else { + klog.V(2).Infof("error fetching v1beta1 certificate signing request: %v", err) + } + + // return if we've timed out + if err := ctx.Err(); err != nil { + return nil, wait.ErrWaitTimeout + } + + // wait and try again + time.Sleep(time.Second) + } + + var issuedCertificate []byte + _, err = watchtools.UntilWithSync( + ctx, + lw, + obj, + nil, + func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Modified, watch.Added: + case watch.Deleted: + return false, fmt.Errorf("csr %q was deleted", reqName) + default: + return false, nil + } + + switch csr := event.Object.(type) { + case *certificatesv1.CertificateSigningRequest: + if csr.UID != reqUID { + return false, fmt.Errorf("csr %q changed UIDs", csr.Name) + } + approved := false + for _, c := range csr.Status.Conditions { + if c.Type == certificatesv1.CertificateDenied { + return false, fmt.Errorf("certificate signing request is denied, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1.CertificateFailed { + return false, fmt.Errorf("certificate signing request failed, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1.CertificateApproved { + approved = true + } + } + if approved { + if len(csr.Status.Certificate) > 0 { + klog.V(2).Infof("certificate signing request %s is issued", csr.Name) + issuedCertificate = csr.Status.Certificate + return true, nil + } + klog.V(2).Infof("certificate signing request %s is approved, waiting to be issued", csr.Name) + } + + case *certificatesv1beta1.CertificateSigningRequest: + if csr.UID != reqUID { + return false, fmt.Errorf("csr %q changed UIDs", csr.Name) + } + approved := false + for _, c := range csr.Status.Conditions { + if c.Type == certificatesv1beta1.CertificateDenied { + return false, fmt.Errorf("certificate signing request is denied, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1beta1.CertificateFailed { + return false, fmt.Errorf("certificate signing request failed, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1beta1.CertificateApproved { + approved = true + } + } + if approved { + if len(csr.Status.Certificate) > 0 { + klog.V(2).Infof("certificate signing request %s is issued", csr.Name) + issuedCertificate = csr.Status.Certificate + return true, nil + } + klog.V(2).Infof("certificate signing request %s is approved, waiting to be issued", csr.Name) + } + + default: + return false, fmt.Errorf("unexpected type received: %T", event.Object) + } + + return false, nil + }, + ) + if err == wait.ErrWaitTimeout { + return nil, wait.ErrWaitTimeout + } + if err != nil { + return nil, formatError("cannot watch on the certificate signing request: %v", err) + } + + return issuedCertificate, nil +} + +// ensureCompatible ensures that a CSR object is compatible with an original CSR +func ensureCompatible(new, orig *certificatesv1.CertificateSigningRequest, privateKey interface{}) error { + newCSR, err := parseCSR(new.Spec.Request) + if err != nil { + return fmt.Errorf("unable to parse new csr: %v", err) + } + origCSR, err := parseCSR(orig.Spec.Request) + if err != nil { + return fmt.Errorf("unable to parse original csr: %v", err) + } + if !reflect.DeepEqual(newCSR.Subject, origCSR.Subject) { + return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCSR.Subject, origCSR.Subject) + } + if len(new.Spec.SignerName) > 0 && len(orig.Spec.SignerName) > 0 && new.Spec.SignerName != orig.Spec.SignerName { + return fmt.Errorf("csr signerNames differ: new %q, orig: %q", new.Spec.SignerName, orig.Spec.SignerName) + } + signer, ok := privateKey.(crypto.Signer) + if !ok { + return fmt.Errorf("privateKey is not a signer") + } + newCSR.PublicKey = signer.Public() + if err := newCSR.CheckSignature(); err != nil { + return fmt.Errorf("error validating signature new CSR against old key: %v", err) + } + if len(new.Status.Certificate) > 0 { + certs, err := certutil.ParseCertsPEM(new.Status.Certificate) + if err != nil { + return fmt.Errorf("error parsing signed certificate for CSR: %v", err) + } + now := time.Now() + for _, cert := range certs { + if now.After(cert.NotAfter) { + return fmt.Errorf("one of the certificates for the CSR has expired: %s", cert.NotAfter) + } + } + } + return nil +} + +// formatError preserves the type of an API message but alters the message. Expects +// a single argument format string, and returns the wrapped error. +func formatError(format string, err error) error { + if s, ok := err.(errors.APIStatus); ok { + se := &errors.StatusError{ErrStatus: s.Status()} + se.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message) + return se + } + return fmt.Errorf(format, err) +} + +// parseCSR extracts the CSR from the API object and decodes it. +func parseCSR(pemData []byte) (*x509.CertificateRequest, error) { + // extract PEM from request object + block, _ := pem.Decode(pemData) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST") + } + return x509.ParseCertificateRequest(block.Bytes) +} diff --git a/vendor/k8s.io/kubectl/pkg/apps/kind_visitor.go b/vendor/k8s.io/kubectl/pkg/apps/kind_visitor.go new file mode 100644 index 000000000..931c63b18 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/apps/kind_visitor.go @@ -0,0 +1,75 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// KindVisitor is used with GroupKindElement to call a particular function depending on the +// Kind of a schema.GroupKind +type KindVisitor interface { + VisitDaemonSet(kind GroupKindElement) + VisitDeployment(kind GroupKindElement) + VisitJob(kind GroupKindElement) + VisitPod(kind GroupKindElement) + VisitReplicaSet(kind GroupKindElement) + VisitReplicationController(kind GroupKindElement) + VisitStatefulSet(kind GroupKindElement) + VisitCronJob(kind GroupKindElement) +} + +// GroupKindElement defines a Kubernetes API group elem +type GroupKindElement schema.GroupKind + +// Accept calls the Visit method on visitor that corresponds to elem's Kind +func (elem GroupKindElement) Accept(visitor KindVisitor) error { + switch { + case elem.GroupMatch("apps", "extensions") && elem.Kind == "DaemonSet": + visitor.VisitDaemonSet(elem) + case elem.GroupMatch("apps", "extensions") && elem.Kind == "Deployment": + visitor.VisitDeployment(elem) + case elem.GroupMatch("batch") && elem.Kind == "Job": + visitor.VisitJob(elem) + case elem.GroupMatch("", "core") && elem.Kind == "Pod": + visitor.VisitPod(elem) + case elem.GroupMatch("apps", "extensions") && elem.Kind == "ReplicaSet": + visitor.VisitReplicaSet(elem) + case elem.GroupMatch("", "core") && elem.Kind == "ReplicationController": + visitor.VisitReplicationController(elem) + case elem.GroupMatch("apps") && elem.Kind == "StatefulSet": + visitor.VisitStatefulSet(elem) + case elem.GroupMatch("batch") && elem.Kind == "CronJob": + visitor.VisitCronJob(elem) + default: + return fmt.Errorf("no visitor method exists for %v", elem) + } + return nil +} + +// GroupMatch returns true if and only if elem's group matches one +// of the group arguments +func (elem GroupKindElement) GroupMatch(groups ...string) bool { + for _, g := range groups { + if elem.Group == g { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/podcmd/podcmd.go b/vendor/k8s.io/kubectl/pkg/cmd/util/podcmd/podcmd.go new file mode 100644 index 000000000..1c4644193 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/podcmd/podcmd.go @@ -0,0 +1,104 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podcmd + +import ( + "fmt" + "io" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +// DefaultContainerAnnotationName is an annotation name that can be used to preselect the interesting container +// from a pod when running kubectl. +const DefaultContainerAnnotationName = "kubectl.kubernetes.io/default-container" + +// FindContainerByName selects the named container from the spec of +// the provided pod or return nil if no such container exists. +func FindContainerByName(pod *v1.Pod, name string) (*v1.Container, string) { + for i := range pod.Spec.Containers { + if pod.Spec.Containers[i].Name == name { + return &pod.Spec.Containers[i], fmt.Sprintf("spec.containers{%s}", name) + } + } + for i := range pod.Spec.InitContainers { + if pod.Spec.InitContainers[i].Name == name { + return &pod.Spec.InitContainers[i], fmt.Sprintf("spec.initContainers{%s}", name) + } + } + for i := range pod.Spec.EphemeralContainers { + if pod.Spec.EphemeralContainers[i].Name == name { + return (*v1.Container)(&pod.Spec.EphemeralContainers[i].EphemeralContainerCommon), fmt.Sprintf("spec.ephemeralContainers{%s}", name) + } + } + return nil, "" +} + +// FindOrDefaultContainerByName defaults a container for a pod to the first container if any +// exists, or returns an error. It will print a message to the user indicating a default was +// selected if there was more than one container. +func FindOrDefaultContainerByName(pod *v1.Pod, name string, quiet bool, warn io.Writer) (*v1.Container, error) { + var container *v1.Container + + if len(name) > 0 { + container, _ = FindContainerByName(pod, name) + if container == nil { + return nil, fmt.Errorf("container %s not found in pod %s", name, pod.Name) + } + return container, nil + } + + // this should never happen, but just in case + if len(pod.Spec.Containers) == 0 { + return nil, fmt.Errorf("pod %s/%s does not have any containers", pod.Namespace, pod.Name) + } + + // read the default container the annotation as per + // https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/2227-kubectl-default-container + if name := pod.Annotations[DefaultContainerAnnotationName]; len(name) > 0 { + if container, _ = FindContainerByName(pod, name); container != nil { + klog.V(4).Infof("Defaulting container name from annotation %s", container.Name) + return container, nil + } + klog.V(4).Infof("Default container name from annotation %s was not found in the pod", name) + } + + // pick the first container as per existing behavior + container = &pod.Spec.Containers[0] + if !quiet && (len(pod.Spec.Containers) > 1 || len(pod.Spec.InitContainers) > 0 || len(pod.Spec.EphemeralContainers) > 0) { + fmt.Fprintf(warn, "Defaulted container %q out of: %s\n", container.Name, allContainerNames(pod)) + } + + klog.V(4).Infof("Defaulting container name to %s", container.Name) + return &pod.Spec.Containers[0], nil +} + +func allContainerNames(pod *v1.Pod) string { + var containers []string + for _, container := range pod.Spec.Containers { + containers = append(containers, container.Name) + } + for _, container := range pod.Spec.EphemeralContainers { + containers = append(containers, fmt.Sprintf("%s (ephem)", container.Name)) + } + for _, container := range pod.Spec.InitContainers { + containers = append(containers, fmt.Sprintf("%s (init)", container.Name)) + } + return strings.Join(containers, ", ") +} diff --git a/vendor/k8s.io/kubectl/pkg/describe/describe.go b/vendor/k8s.io/kubectl/pkg/describe/describe.go new file mode 100644 index 000000000..558835cd2 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/describe/describe.go @@ -0,0 +1,5553 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package describe + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "net" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + "unicode" + + "github.com/fatih/camelcase" + + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + schedulingv1 "k8s.io/api/scheduling/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/duration" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + runtimeresource "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + clientset "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/reference" + utilcsr "k8s.io/client-go/util/certificate/csr" + "k8s.io/klog/v2" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/certificate" + deploymentutil "k8s.io/kubectl/pkg/util/deployment" + "k8s.io/kubectl/pkg/util/event" + "k8s.io/kubectl/pkg/util/fieldpath" + "k8s.io/kubectl/pkg/util/qos" + "k8s.io/kubectl/pkg/util/rbac" + resourcehelper "k8s.io/kubectl/pkg/util/resource" + "k8s.io/kubectl/pkg/util/slice" + storageutil "k8s.io/kubectl/pkg/util/storage" +) + +// Each level has 2 spaces for PrefixWriter +const ( + LEVEL_0 = iota + LEVEL_1 + LEVEL_2 + LEVEL_3 + LEVEL_4 +) + +var ( + // globally skipped annotations + skipAnnotations = sets.NewString(corev1.LastAppliedConfigAnnotation) + + // DescriberFn gives a way to easily override the function for unit testing if needed + DescriberFn DescriberFunc = Describer +) + +// Describer returns a Describer for displaying the specified RESTMapping type or an error. +func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (ResourceDescriber, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + // try to get a describer + if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { + return describer, nil + } + // if this is a kind we don't have a describer for yet, go generic if possible + if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok { + return genericDescriber, nil + } + // otherwise return an unregistered error + return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) +} + +// PrefixWriter can write text at various indentation levels. +type PrefixWriter interface { + // Write writes text with the specified indentation level. + Write(level int, format string, a ...interface{}) + // WriteLine writes an entire line with no indentation level. + WriteLine(a ...interface{}) + // Flush forces indentation to be reset. + Flush() +} + +// prefixWriter implements PrefixWriter +type prefixWriter struct { + out io.Writer +} + +var _ PrefixWriter = &prefixWriter{} + +// NewPrefixWriter creates a new PrefixWriter. +func NewPrefixWriter(out io.Writer) PrefixWriter { + return &prefixWriter{out: out} +} + +func (pw *prefixWriter) Write(level int, format string, a ...interface{}) { + levelSpace := " " + prefix := "" + for i := 0; i < level; i++ { + prefix += levelSpace + } + fmt.Fprintf(pw.out, prefix+format, a...) +} + +func (pw *prefixWriter) WriteLine(a ...interface{}) { + fmt.Fprintln(pw.out, a...) +} + +func (pw *prefixWriter) Flush() { + if f, ok := pw.out.(flusher); ok { + f.Flush() + } +} + +// nestedPrefixWriter implements PrefixWriter by increasing the level +// before passing text on to some other writer. +type nestedPrefixWriter struct { + PrefixWriter + indent int +} + +var _ PrefixWriter = &prefixWriter{} + +// NewPrefixWriter creates a new PrefixWriter. +func NewNestedPrefixWriter(out PrefixWriter, indent int) PrefixWriter { + return &nestedPrefixWriter{PrefixWriter: out, indent: indent} +} + +func (npw *nestedPrefixWriter) Write(level int, format string, a ...interface{}) { + npw.PrefixWriter.Write(level+npw.indent, format, a...) +} + +func (npw *nestedPrefixWriter) WriteLine(a ...interface{}) { + npw.PrefixWriter.Write(npw.indent, "%s", fmt.Sprintln(a...)) +} + +func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescriber, error) { + c, err := clientset.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + m := map[schema.GroupKind]ResourceDescriber{ + {Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c}, + {Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c}, + {Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c}, + {Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c}, + {Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c}, + {Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c}, + {Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c}, + {Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c}, + {Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c}, + {Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c}, + {Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c}, + {Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, + {Group: discoveryv1beta1.GroupName, Kind: "EndpointSlice"}: &EndpointSliceDescriber{c}, + {Group: discoveryv1.GroupName, Kind: "EndpointSlice"}: &EndpointSliceDescriber{c}, + {Group: policyv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c}, + {Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, + {Group: networkingv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, + {Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, + {Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, + {Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, + {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, + {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, + {Group: batchv1beta1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, + {Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, + {Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, + {Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c}, + {Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c}, + {Group: storagev1.GroupName, Kind: "CSINode"}: &CSINodeDescriber{c}, + {Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, + {Group: policyv1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, + {Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c}, + {Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, + {Group: schedulingv1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, + } + + return m, nil +} + +// DescriberFor returns the default describe functions for each of the standard +// Kubernetes types. +func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (ResourceDescriber, bool) { + describers, err := describerMap(clientConfig) + if err != nil { + klog.V(1).Info(err) + return nil, false + } + + f, ok := describers[kind] + return f, ok +} + +// GenericDescriberFor returns a generic describer for the specified mapping +// that uses only information available from runtime.Unstructured +func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (ResourceDescriber, bool) { + // used to fetch the resource + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + + // used to get events for the resource + clientSet, err := clientset.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + eventsClient := clientSet.CoreV1() + + return &genericDescriber{mapping, dynamicClient, eventsClient}, true +} + +type genericDescriber struct { + mapping *meta.RESTMapping + dynamic dynamic.Interface + events corev1client.EventsGetter +} + +func (g *genericDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) { + obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(g.events, obj, describerSettings.ChunkSize) + } + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", obj.GetName()) + w.Write(LEVEL_0, "Namespace:\t%s\n", obj.GetNamespace()) + printLabelsMultiline(w, "Labels", obj.GetLabels()) + printAnnotationsMultiline(w, "Annotations", obj.GetAnnotations()) + printUnstructuredContent(w, LEVEL_0, obj.UnstructuredContent(), "", ".metadata.name", ".metadata.namespace", ".metadata.labels", ".metadata.annotations") + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func printUnstructuredContent(w PrefixWriter, level int, content map[string]interface{}, skipPrefix string, skip ...string) { + fields := []string{} + for field := range content { + fields = append(fields, field) + } + sort.Strings(fields) + + for _, field := range fields { + value := content[field] + switch typedValue := value.(type) { + case map[string]interface{}: + skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) + if slice.ContainsString(skip, skipExpr, nil) { + continue + } + w.Write(level, "%s:\n", smartLabelFor(field)) + printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...) + + case []interface{}: + skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) + if slice.ContainsString(skip, skipExpr, nil) { + continue + } + w.Write(level, "%s:\n", smartLabelFor(field)) + for _, child := range typedValue { + switch typedChild := child.(type) { + case map[string]interface{}: + printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...) + default: + w.Write(level+1, "%v\n", typedChild) + } + } + + default: + skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) + if slice.ContainsString(skip, skipExpr, nil) { + continue + } + w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue) + } + } +} + +func smartLabelFor(field string) string { + // skip creating smart label if field name contains + // special characters other than '-' + if strings.IndexFunc(field, func(r rune) bool { + return !unicode.IsLetter(r) && r != '-' + }) != -1 { + return field + } + + commonAcronyms := []string{"API", "URL", "UID", "OSB", "GUID"} + parts := camelcase.Split(field) + result := make([]string, 0, len(parts)) + for _, part := range parts { + if part == "_" { + continue + } + + if slice.ContainsString(commonAcronyms, strings.ToUpper(part), nil) { + part = strings.ToUpper(part) + } else { + part = strings.Title(part) + } + result = append(result, part) + } + + return strings.Join(result, " ") +} + +// DefaultObjectDescriber can describe the default Kubernetes objects. +var DefaultObjectDescriber ObjectDescriber + +func init() { + d := &Describers{} + err := d.Add( + describeLimitRange, + describeQuota, + describePod, + describeService, + describeReplicationController, + describeDaemonSet, + describeNode, + describeNamespace, + ) + if err != nil { + klog.Fatalf("Cannot register describers: %v", err) + } + DefaultObjectDescriber = d +} + +// NamespaceDescriber generates information about a namespace +type NamespaceDescriber struct { + clientset.Interface +} + +func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ns, err := d.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + resourceQuotaList := &corev1.ResourceQuotaList{} + err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize}, + func(options metav1.ListOptions) (runtime.Object, error) { + newList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourceQuotas.String()) + } + resourceQuotaList.Items = append(resourceQuotaList.Items, newList.Items...) + return newList, nil + }) + if err != nil { + if apierrors.IsNotFound(err) { + // Server does not support resource quotas. + // Not an error, will not show resource quotas information. + resourceQuotaList = nil + } else { + return "", err + } + } + + limitRangeList := &corev1.LimitRangeList{} + err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize}, + func(options metav1.ListOptions) (runtime.Object, error) { + newList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, "limitranges") + } + limitRangeList.Items = append(limitRangeList.Items, newList.Items...) + return newList, nil + }) + if err != nil { + if apierrors.IsNotFound(err) { + // Server does not support limit ranges. + // Not an error, will not show limit ranges information. + limitRangeList = nil + } else { + return "", err + } + } + return describeNamespace(ns, resourceQuotaList, limitRangeList) +} + +func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name) + printLabelsMultiline(w, "Labels", namespace.Labels) + printAnnotationsMultiline(w, "Annotations", namespace.Annotations) + w.Write(LEVEL_0, "Status:\t%s\n", string(namespace.Status.Phase)) + + if len(namespace.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n") + w.Write(LEVEL_1, "Type\tStatus\tLastTransitionTime\tReason\tMessage\n") + w.Write(LEVEL_1, "----\t------\t------------------\t------\t-------\n") + for _, c := range namespace.Status.Conditions { + w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n", + c.Type, + c.Status, + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + + if resourceQuotaList != nil { + w.Write(LEVEL_0, "\n") + DescribeResourceQuotas(resourceQuotaList, w) + } + + if limitRangeList != nil { + w.Write(LEVEL_0, "\n") + DescribeLimitRanges(limitRangeList, w) + } + + return nil + }) +} + +func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) { + for i := range spec.Limits { + item := spec.Limits[i] + maxResources := item.Max + minResources := item.Min + defaultLimitResources := item.Default + defaultRequestResources := item.DefaultRequest + ratio := item.MaxLimitRequestRatio + + set := map[corev1.ResourceName]bool{} + for k := range maxResources { + set[k] = true + } + for k := range minResources { + set[k] = true + } + for k := range defaultLimitResources { + set[k] = true + } + for k := range defaultRequestResources { + set[k] = true + } + for k := range ratio { + set[k] = true + } + + for k := range set { + // if no value is set, we output - + maxValue := "-" + minValue := "-" + defaultLimitValue := "-" + defaultRequestValue := "-" + ratioValue := "-" + + maxQuantity, maxQuantityFound := maxResources[k] + if maxQuantityFound { + maxValue = maxQuantity.String() + } + + minQuantity, minQuantityFound := minResources[k] + if minQuantityFound { + minValue = minQuantity.String() + } + + defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] + if defaultLimitQuantityFound { + defaultLimitValue = defaultLimitQuantity.String() + } + + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] + if defaultRequestQuantityFound { + defaultRequestValue = defaultRequestQuantity.String() + } + + ratioQuantity, ratioQuantityFound := ratio[k] + if ratioQuantityFound { + ratioValue = ratioQuantity.String() + } + + msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n" + w.Write(LEVEL_0, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) + } + } +} + +// DescribeLimitRanges merges a set of limit range items into a single tabular description +func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) { + if len(limitRanges.Items) == 0 { + w.Write(LEVEL_0, "No LimitRange resource.\n") + return + } + w.Write(LEVEL_0, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") + w.Write(LEVEL_0, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") + for _, limitRange := range limitRanges.Items { + describeLimitRangeSpec(limitRange.Spec, " ", w) + } +} + +// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas +func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) { + if len(quotas.Items) == 0 { + w.Write(LEVEL_0, "No resource quota.\n") + return + } + sort.Sort(SortableResourceQuotas(quotas.Items)) + + w.Write(LEVEL_0, "Resource Quotas\n") + for _, q := range quotas.Items { + w.Write(LEVEL_1, "Name:\t%s\n", q.Name) + if len(q.Spec.Scopes) > 0 { + scopes := make([]string, 0, len(q.Spec.Scopes)) + for _, scope := range q.Spec.Scopes { + scopes = append(scopes, string(scope)) + } + sort.Strings(scopes) + w.Write(LEVEL_1, "Scopes:\t%s\n", strings.Join(scopes, ", ")) + for _, scope := range scopes { + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) + if len(helpText) > 0 { + w.Write(LEVEL_1, "* %s\n", helpText) + } + } + } + + w.Write(LEVEL_1, "Resource\tUsed\tHard\n") + w.Write(LEVEL_1, "--------\t---\t---\n") + + resources := make([]corev1.ResourceName, 0, len(q.Status.Hard)) + for resource := range q.Status.Hard { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + + for _, resource := range resources { + hardQuantity := q.Status.Hard[resource] + usedQuantity := q.Status.Used[resource] + w.Write(LEVEL_1, "%s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String()) + } + } +} + +// LimitRangeDescriber generates information about a limit range +type LimitRangeDescriber struct { + clientset.Interface +} + +func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + lr := d.CoreV1().LimitRanges(namespace) + + limitRange, err := lr.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return describeLimitRange(limitRange) +} + +func describeLimitRange(limitRange *corev1.LimitRange) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", limitRange.Namespace) + w.Write(LEVEL_0, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") + w.Write(LEVEL_0, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") + describeLimitRangeSpec(limitRange.Spec, "", w) + return nil + }) +} + +// ResourceQuotaDescriber generates information about a resource quota +type ResourceQuotaDescriber struct { + clientset.Interface +} + +func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rq := d.CoreV1().ResourceQuotas(namespace) + + resourceQuota, err := rq.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return describeQuota(resourceQuota) +} + +func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string { + switch scope { + case corev1.ResourceQuotaScopeTerminating: + return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system." + case corev1.ResourceQuotaScopeNotTerminating: + return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate." + case corev1.ResourceQuotaScopeBestEffort: + return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service." + case corev1.ResourceQuotaScopeNotBestEffort: + return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service." + default: + return "" + } +} +func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", resourceQuota.Namespace) + if len(resourceQuota.Spec.Scopes) > 0 { + scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) + for _, scope := range resourceQuota.Spec.Scopes { + scopes = append(scopes, string(scope)) + } + sort.Strings(scopes) + w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", ")) + for _, scope := range scopes { + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) + if len(helpText) > 0 { + w.Write(LEVEL_0, " * %s\n", helpText) + } + } + } + w.Write(LEVEL_0, "Resource\tUsed\tHard\n") + w.Write(LEVEL_0, "--------\t----\t----\n") + + resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard)) + for resource := range resourceQuota.Status.Hard { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + + msg := "%v\t%v\t%v\n" + for i := range resources { + resourceName := resources[i] + hardQuantity := resourceQuota.Status.Hard[resourceName] + usedQuantity := resourceQuota.Status.Used[resourceName] + if hardQuantity.Format != usedQuantity.Format { + usedQuantity = *resource.NewQuantity(usedQuantity.Value(), hardQuantity.Format) + } + w.Write(LEVEL_0, msg, resourceName, usedQuantity.String(), hardQuantity.String()) + } + return nil + }) +} + +// PodDescriber generates information about a pod and the replication controllers that +// create it. +type PodDescriber struct { + clientset.Interface +} + +func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + pod, err := d.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + if describerSettings.ShowEvents { + eventsInterface := d.CoreV1().Events(namespace) + selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) + initialOpts := metav1.ListOptions{ + FieldSelector: selector.String(), + Limit: describerSettings.ChunkSize, + } + events := &corev1.EventList{} + err2 := runtimeresource.FollowContinue(&initialOpts, + func(options metav1.ListOptions) (runtime.Object, error) { + newList, err := eventsInterface.List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, "events") + } + events.Items = append(events.Items, newList.Items...) + return newList, nil + }) + + if err2 == nil && len(events.Items) > 0 { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err) + DescribeEvents(events, w) + return nil + }) + } + } + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + } else { + ref.Kind = "" + if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod { + ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey]) + } + events, _ = searchEvents(d.CoreV1(), ref, describerSettings.ChunkSize) + } + } + + return describePod(pod, events) +} + +func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", pod.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace) + if pod.Spec.Priority != nil { + w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority) + } + if len(pod.Spec.PriorityClassName) > 0 { + w.Write(LEVEL_0, "Priority Class Name:\t%s\n", stringOrNone(pod.Spec.PriorityClassName)) + } + if pod.Spec.NodeName == "" { + w.Write(LEVEL_0, "Node:\t\n") + } else { + w.Write(LEVEL_0, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) + } + if pod.Status.StartTime != nil { + w.Write(LEVEL_0, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) + } + printLabelsMultiline(w, "Labels", pod.Labels) + printAnnotationsMultiline(w, "Annotations", pod.Annotations) + if pod.DeletionTimestamp != nil { + w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pod.DeletionTimestamp)) + w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) + } else { + w.Write(LEVEL_0, "Status:\t%s\n", string(pod.Status.Phase)) + } + if len(pod.Status.Reason) > 0 { + w.Write(LEVEL_0, "Reason:\t%s\n", pod.Status.Reason) + } + if len(pod.Status.Message) > 0 { + w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message) + } + // remove when .IP field is depreciated + w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP) + describePodIPs(pod, w, "") + if controlledBy := printController(pod); len(controlledBy) > 0 { + w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) + } + if len(pod.Status.NominatedNodeName) > 0 { + w.Write(LEVEL_0, "NominatedNodeName:\t%s\n", pod.Status.NominatedNodeName) + } + + if len(pod.Spec.InitContainers) > 0 { + describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "") + } + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "") + if len(pod.Spec.EphemeralContainers) > 0 { + var ec []corev1.Container + for i := range pod.Spec.EphemeralContainers { + ec = append(ec, corev1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon)) + } + describeContainers("Ephemeral Containers", ec, pod.Status.EphemeralContainerStatuses, EnvValueRetriever(pod), w, "") + } + if len(pod.Spec.ReadinessGates) > 0 { + w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n") + for _, g := range pod.Spec.ReadinessGates { + status := "" + for _, c := range pod.Status.Conditions { + if c.Type == g.ConditionType { + status = fmt.Sprintf("%v", c.Status) + break + } + } + w.Write(LEVEL_1, "%v \t%v \n", + g.ConditionType, + status) + } + } + if len(pod.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n") + for _, c := range pod.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v \n", + c.Type, + c.Status) + } + } + describeVolumes(pod.Spec.Volumes, w, "") + if pod.Status.QOSClass != "" { + w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass) + } else { + w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) + } + printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector) + printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations) + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func printController(controllee metav1.Object) string { + if controllerRef := metav1.GetControllerOf(controllee); controllerRef != nil { + return fmt.Sprintf("%s/%s", controllerRef.Kind, controllerRef.Name) + } + return "" +} + +func describePodIPs(pod *corev1.Pod, w PrefixWriter, space string) { + if len(pod.Status.PodIPs) == 0 { + w.Write(LEVEL_0, "%sIPs:\t\n", space) + return + } + w.Write(LEVEL_0, "%sIPs:\n", space) + for _, ipInfo := range pod.Status.PodIPs { + w.Write(LEVEL_1, "IP:\t%s\n", ipInfo.IP) + } +} + +func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) { + if len(volumes) == 0 { + w.Write(LEVEL_0, "%sVolumes:\t\n", space) + return + } + + w.Write(LEVEL_0, "%sVolumes:\n", space) + for _, volume := range volumes { + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + w.Write(LEVEL_1, "%s%v:\n", nameIndent, volume.Name) + switch { + case volume.VolumeSource.HostPath != nil: + printHostPathVolumeSource(volume.VolumeSource.HostPath, w) + case volume.VolumeSource.EmptyDir != nil: + printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, w) + case volume.VolumeSource.GCEPersistentDisk != nil: + printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, w) + case volume.VolumeSource.AWSElasticBlockStore != nil: + printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, w) + case volume.VolumeSource.GitRepo != nil: + printGitRepoVolumeSource(volume.VolumeSource.GitRepo, w) + case volume.VolumeSource.Secret != nil: + printSecretVolumeSource(volume.VolumeSource.Secret, w) + case volume.VolumeSource.ConfigMap != nil: + printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, w) + case volume.VolumeSource.NFS != nil: + printNFSVolumeSource(volume.VolumeSource.NFS, w) + case volume.VolumeSource.ISCSI != nil: + printISCSIVolumeSource(volume.VolumeSource.ISCSI, w) + case volume.VolumeSource.Glusterfs != nil: + printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, w) + case volume.VolumeSource.PersistentVolumeClaim != nil: + printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, w) + case volume.VolumeSource.Ephemeral != nil: + printEphemeralVolumeSource(volume.VolumeSource.Ephemeral, w) + case volume.VolumeSource.RBD != nil: + printRBDVolumeSource(volume.VolumeSource.RBD, w) + case volume.VolumeSource.Quobyte != nil: + printQuobyteVolumeSource(volume.VolumeSource.Quobyte, w) + case volume.VolumeSource.DownwardAPI != nil: + printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, w) + case volume.VolumeSource.AzureDisk != nil: + printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w) + case volume.VolumeSource.VsphereVolume != nil: + printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w) + case volume.VolumeSource.Cinder != nil: + printCinderVolumeSource(volume.VolumeSource.Cinder, w) + case volume.VolumeSource.PhotonPersistentDisk != nil: + printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w) + case volume.VolumeSource.PortworxVolume != nil: + printPortworxVolumeSource(volume.VolumeSource.PortworxVolume, w) + case volume.VolumeSource.ScaleIO != nil: + printScaleIOVolumeSource(volume.VolumeSource.ScaleIO, w) + case volume.VolumeSource.CephFS != nil: + printCephFSVolumeSource(volume.VolumeSource.CephFS, w) + case volume.VolumeSource.StorageOS != nil: + printStorageOSVolumeSource(volume.VolumeSource.StorageOS, w) + case volume.VolumeSource.FC != nil: + printFCVolumeSource(volume.VolumeSource.FC, w) + case volume.VolumeSource.AzureFile != nil: + printAzureFileVolumeSource(volume.VolumeSource.AzureFile, w) + case volume.VolumeSource.FlexVolume != nil: + printFlexVolumeSource(volume.VolumeSource.FlexVolume, w) + case volume.VolumeSource.Flocker != nil: + printFlockerVolumeSource(volume.VolumeSource.Flocker, w) + case volume.VolumeSource.Projected != nil: + printProjectedVolumeSource(volume.VolumeSource.Projected, w) + case volume.VolumeSource.CSI != nil: + printCSIVolumeSource(volume.VolumeSource.CSI, w) + default: + w.Write(LEVEL_1, "\n") + } + } +} + +func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) { + hostPathType := "" + if hostPath.Type != nil { + hostPathType = string(*hostPath.Type) + } + w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+ + " Path:\t%v\n"+ + " HostPathType:\t%v\n", + hostPath.Path, hostPathType) +} + +func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) { + var sizeLimit string + if emptyDir.SizeLimit != nil && emptyDir.SizeLimit.Cmp(resource.Quantity{}) > 0 { + sizeLimit = fmt.Sprintf("%v", emptyDir.SizeLimit) + } else { + sizeLimit = "" + } + w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ + " Medium:\t%v\n"+ + " SizeLimit:\t%v\n", + emptyDir.Medium, sizeLimit) +} + +func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ + " PDName:\t%v\n"+ + " FSType:\t%v\n"+ + " Partition:\t%v\n"+ + " ReadOnly:\t%v\n", + gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) +} + +func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ + " VolumeID:\t%v\n"+ + " FSType:\t%v\n"+ + " Partition:\t%v\n"+ + " ReadOnly:\t%v\n", + aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) +} + +func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ + " Repository:\t%v\n"+ + " Revision:\t%v\n", + git.Repository, git.Revision) +} + +func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) { + optional := secret.Optional != nil && *secret.Optional + w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+ + " SecretName:\t%v\n"+ + " Optional:\t%v\n", + secret.SecretName, optional) +} + +func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) { + optional := configMap.Optional != nil && *configMap.Optional + w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ + " Name:\t%v\n"+ + " Optional:\t%v\n", + configMap.Name, optional) +} + +func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n") + for _, source := range projected.Sources { + if source.Secret != nil { + w.Write(LEVEL_2, "SecretName:\t%v\n"+ + " SecretOptionalName:\t%v\n", + source.Secret.Name, source.Secret.Optional) + } else if source.DownwardAPI != nil { + w.Write(LEVEL_2, "DownwardAPI:\ttrue\n") + } else if source.ConfigMap != nil { + w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+ + " ConfigMapOptional:\t%v\n", + source.ConfigMap.Name, source.ConfigMap.Optional) + } else if source.ServiceAccountToken != nil { + w.Write(LEVEL_2, "TokenExpirationSeconds:\t%d\n", + *source.ServiceAccountToken.ExpirationSeconds) + } + } +} + +func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ + " Server:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + nfs.Server, nfs.Path, nfs.ReadOnly) +} + +func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+ + " Registry:\t%v\n"+ + " Volume:\t%v\n"+ + " ReadOnly:\t%v\n", + quobyte.Registry, quobyte.Volume, quobyte.ReadOnly) +} + +func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+ + " VolumeID:\t%v\n", + pwxVolume.VolumeID) +} + +func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) { + initiator := "" + if iscsi.InitiatorName != nil { + initiator = *iscsi.InitiatorName + } + w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ + " TargetPortal:\t%v\n"+ + " IQN:\t%v\n"+ + " Lun:\t%v\n"+ + " ISCSIInterface\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " Portals:\t%v\n"+ + " DiscoveryCHAPAuth:\t%v\n"+ + " SessionCHAPAuth:\t%v\n"+ + " SecretRef:\t%v\n"+ + " InitiatorName:\t%v\n", + iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator) +} + +func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) { + initiatorName := "" + if iscsi.InitiatorName != nil { + initiatorName = *iscsi.InitiatorName + } + w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ + " TargetPortal:\t%v\n"+ + " IQN:\t%v\n"+ + " Lun:\t%v\n"+ + " ISCSIInterface\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " Portals:\t%v\n"+ + " DiscoveryCHAPAuth:\t%v\n"+ + " SessionCHAPAuth:\t%v\n"+ + " SecretRef:\t%v\n"+ + " InitiatorName:\t%v\n", + iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName) +} + +func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ + " EndpointsName:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) +} + +func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) { + endpointsNamespace := "" + if glusterfs.EndpointsNamespace != nil { + endpointsNamespace = *glusterfs.EndpointsNamespace + } + w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ + " EndpointsName:\t%v\n"+ + " EndpointsNamespace:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + glusterfs.EndpointsName, endpointsNamespace, glusterfs.Path, glusterfs.ReadOnly) +} + +func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ + " ClaimName:\t%v\n"+ + " ReadOnly:\t%v\n", + claim.ClaimName, claim.ReadOnly) +} + +func printEphemeralVolumeSource(ephemeral *corev1.EphemeralVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tEphemeralVolume (an inline specification for a volume that gets created and deleted with the pod)\n") + if ephemeral.VolumeClaimTemplate != nil { + printPersistentVolumeClaim(NewNestedPrefixWriter(w, LEVEL_2), + &corev1.PersistentVolumeClaim{ + ObjectMeta: ephemeral.VolumeClaimTemplate.ObjectMeta, + Spec: ephemeral.VolumeClaimTemplate.Spec, + }, false /* not a full PVC */) + } +} + +func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ + " CephMonitors:\t%v\n"+ + " RBDImage:\t%v\n"+ + " FSType:\t%v\n"+ + " RBDPool:\t%v\n"+ + " RadosUser:\t%v\n"+ + " Keyring:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) +} + +func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ + " CephMonitors:\t%v\n"+ + " RBDImage:\t%v\n"+ + " FSType:\t%v\n"+ + " RBDPool:\t%v\n"+ + " RadosUser:\t%v\n"+ + " Keyring:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) +} + +func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") + for _, mapping := range d.Items { + if mapping.FieldRef != nil { + w.Write(LEVEL_3, "%v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) + } + if mapping.ResourceFieldRef != nil { + w.Write(LEVEL_3, "%v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) + } + } +} + +func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+ + " DiskName:\t%v\n"+ + " DiskURI:\t%v\n"+ + " Kind: \t%v\n"+ + " FSType:\t%v\n"+ + " CachingMode:\t%v\n"+ + " ReadOnly:\t%v\n", + d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly) +} + +func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ + " VolumePath:\t%v\n"+ + " FSType:\t%v\n"+ + " StoragePolicyName:\t%v\n", + vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName) +} + +func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+ + " PdID:\t%v\n"+ + " FSType:\t%v\n", + photon.PdID, photon.FSType) +} + +func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ + " VolumeID:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " SecretRef:\t%v\n", + cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) +} + +func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ + " VolumeID:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " SecretRef:\t%v\n", + cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) +} + +func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ + " Gateway:\t%v\n"+ + " System:\t%v\n"+ + " Protection Domain:\t%v\n"+ + " Storage Pool:\t%v\n"+ + " Storage Mode:\t%v\n"+ + " VolumeName:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly) +} + +func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) { + var secretNS, secretName string + if sio.SecretRef != nil { + secretName = sio.SecretRef.Name + secretNS = sio.SecretRef.Namespace + } + w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ + " Gateway:\t%v\n"+ + " System:\t%v\n"+ + " Protection Domain:\t%v\n"+ + " Storage Pool:\t%v\n"+ + " Storage Mode:\t%v\n"+ + " VolumeName:\t%v\n"+ + " SecretName:\t%v\n"+ + " SecretNamespace:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly) +} + +func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+ + " Path:\t%v\n", + ls.Path) +} + +func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ + " Monitors:\t%v\n"+ + " Path:\t%v\n"+ + " User:\t%v\n"+ + " SecretFile:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) +} + +func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ + " Monitors:\t%v\n"+ + " Path:\t%v\n"+ + " User:\t%v\n"+ + " SecretFile:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) +} + +func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ + " VolumeName:\t%v\n"+ + " VolumeNamespace:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) +} + +func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ + " VolumeName:\t%v\n"+ + " VolumeNamespace:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) +} + +func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) { + lun := "" + if fc.Lun != nil { + lun = strconv.Itoa(int(*fc.Lun)) + } + w.Write(LEVEL_2, "Type:\tFC (a Fibre Channel disk)\n"+ + " TargetWWNs:\t%v\n"+ + " LUN:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly) +} + +func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ + " SecretName:\t%v\n"+ + " ShareName:\t%v\n"+ + " ReadOnly:\t%v\n", + azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly) +} + +func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) { + ns := "" + if azureFile.SecretNamespace != nil { + ns = *azureFile.SecretNamespace + } + w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ + " SecretName:\t%v\n"+ + " SecretNamespace:\t%v\n"+ + " ShareName:\t%v\n"+ + " ReadOnly:\t%v\n", + azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly) +} + +func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ + " Driver:\t%v\n"+ + " FSType:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " Options:\t%v\n", + flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) +} + +func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ + " Driver:\t%v\n"+ + " FSType:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " Options:\t%v\n", + flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) +} + +func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+ + " DatasetName:\t%v\n"+ + " DatasetUUID:\t%v\n", + flocker.DatasetName, flocker.DatasetUUID) +} + +func printCSIVolumeSource(csi *corev1.CSIVolumeSource, w PrefixWriter) { + var readOnly bool + var fsType string + if csi.ReadOnly != nil && *csi.ReadOnly { + readOnly = true + } + if csi.FSType != nil { + fsType = *csi.FSType + } + w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ + " Driver:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + csi.Driver, fsType, readOnly) + printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes) +} + +func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ + " Driver:\t%v\n"+ + " FSType:\t%v\n"+ + " VolumeHandle:\t%v\n"+ + " ReadOnly:\t%v\n", + csi.Driver, csi.FSType, csi.VolumeHandle, csi.ReadOnly) + printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes) +} + +func printCSIPersistentVolumeAttributesMultiline(w PrefixWriter, title string, annotations map[string]string) { + printCSIPersistentVolumeAttributesMultilineIndent(w, "", title, "\t", annotations, sets.NewString()) +} + +func printCSIPersistentVolumeAttributesMultilineIndent(w PrefixWriter, initialIndent, title, innerIndent string, attributes map[string]string, skip sets.String) { + w.Write(LEVEL_2, "%s%s:%s", initialIndent, title, innerIndent) + + if len(attributes) == 0 { + w.WriteLine("") + return + } + + // to print labels in the sorted order + keys := make([]string, 0, len(attributes)) + for key := range attributes { + if skip.Has(key) { + continue + } + keys = append(keys, key) + } + if len(attributes) == 0 { + w.WriteLine("") + return + } + sort.Strings(keys) + + for i, key := range keys { + if i != 0 { + w.Write(LEVEL_2, initialIndent) + w.Write(LEVEL_2, innerIndent) + } + line := fmt.Sprintf("%s=%s", key, attributes[key]) + if len(line) > maxAnnotationLen { + w.Write(LEVEL_2, "%s...\n", line[:maxAnnotationLen]) + } else { + w.Write(LEVEL_2, "%s\n", line) + } + } +} + +type PersistentVolumeDescriber struct { + clientset.Interface +} + +func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().PersistentVolumes() + + pv, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), pv, describerSettings.ChunkSize) + } + + return describePersistentVolume(pv, events) +} + +func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) { + w.Write(LEVEL_0, "Node Affinity:\t") + if affinity == nil || affinity.Required == nil { + w.WriteLine("") + return + } + w.WriteLine("") + + if affinity.Required != nil { + w.Write(LEVEL_1, "Required Terms:\t") + if len(affinity.Required.NodeSelectorTerms) == 0 { + w.WriteLine("") + } else { + w.WriteLine("") + for i, term := range affinity.Required.NodeSelectorTerms { + printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions) + } + } + } +} + +// printLabelsMultiline prints multiple labels with a user-defined alignment. +func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) { + w.Write(indentLevel, "%s:%s", title, innerIndent) + + if len(reqs) == 0 { + w.WriteLine("") + return + } + + for i, req := range reqs { + if i != 0 { + w.Write(indentLevel, "%s", innerIndent) + } + exprStr := fmt.Sprintf("%s %s", req.Key, strings.ToLower(string(req.Operator))) + if len(req.Values) > 0 { + exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", ")) + } + w.Write(LEVEL_0, "%s\n", exprStr) + } +} + +func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", pv.Name) + printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels) + printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations) + w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers) + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv)) + if pv.ObjectMeta.DeletionTimestamp != nil { + w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pv.ObjectMeta.DeletionTimestamp)) + } else { + w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase) + } + if pv.Spec.ClaimRef != nil { + w.Write(LEVEL_0, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) + } else { + w.Write(LEVEL_0, "Claim:\t%s\n", "") + } + w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) + w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes)) + if pv.Spec.VolumeMode != nil { + w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode) + } + storage := pv.Spec.Capacity[corev1.ResourceStorage] + w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String()) + printVolumeNodeAffinity(w, pv.Spec.NodeAffinity) + w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message) + w.Write(LEVEL_0, "Source:\n") + + switch { + case pv.Spec.HostPath != nil: + printHostPathVolumeSource(pv.Spec.HostPath, w) + case pv.Spec.GCEPersistentDisk != nil: + printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, w) + case pv.Spec.AWSElasticBlockStore != nil: + printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, w) + case pv.Spec.NFS != nil: + printNFSVolumeSource(pv.Spec.NFS, w) + case pv.Spec.ISCSI != nil: + printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w) + case pv.Spec.Glusterfs != nil: + printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w) + case pv.Spec.RBD != nil: + printRBDPersistentVolumeSource(pv.Spec.RBD, w) + case pv.Spec.Quobyte != nil: + printQuobyteVolumeSource(pv.Spec.Quobyte, w) + case pv.Spec.VsphereVolume != nil: + printVsphereVolumeSource(pv.Spec.VsphereVolume, w) + case pv.Spec.Cinder != nil: + printCinderPersistentVolumeSource(pv.Spec.Cinder, w) + case pv.Spec.AzureDisk != nil: + printAzureDiskVolumeSource(pv.Spec.AzureDisk, w) + case pv.Spec.PhotonPersistentDisk != nil: + printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, w) + case pv.Spec.PortworxVolume != nil: + printPortworxVolumeSource(pv.Spec.PortworxVolume, w) + case pv.Spec.ScaleIO != nil: + printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w) + case pv.Spec.Local != nil: + printLocalVolumeSource(pv.Spec.Local, w) + case pv.Spec.CephFS != nil: + printCephFSPersistentVolumeSource(pv.Spec.CephFS, w) + case pv.Spec.StorageOS != nil: + printStorageOSPersistentVolumeSource(pv.Spec.StorageOS, w) + case pv.Spec.FC != nil: + printFCVolumeSource(pv.Spec.FC, w) + case pv.Spec.AzureFile != nil: + printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w) + case pv.Spec.FlexVolume != nil: + printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w) + case pv.Spec.Flocker != nil: + printFlockerVolumeSource(pv.Spec.Flocker, w) + case pv.Spec.CSI != nil: + printCSIPersistentVolumeSource(pv.Spec.CSI, w) + default: + w.Write(LEVEL_1, "\n") + } + + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +type PersistentVolumeClaimDescriber struct { + clientset.Interface +} + +func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().PersistentVolumeClaims(namespace) + + pvc, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + pc := d.CoreV1().Pods(namespace) + + pods, err := getPodsForPVC(pc, pvc.Name, describerSettings) + if err != nil { + return "", err + } + + events, _ := searchEvents(d.CoreV1(), pvc, describerSettings.ChunkSize) + + return describePersistentVolumeClaim(pvc, events, pods) +} + +func getPodsForPVC(c corev1client.PodInterface, pvcName string, settings DescriberSettings) ([]corev1.Pod, error) { + nsPods, err := getPodsInChunks(c, metav1.ListOptions{Limit: settings.ChunkSize}) + if err != nil { + return []corev1.Pod{}, err + } + + var pods []corev1.Pod + + for _, pod := range nsPods.Items { + for _, volume := range pod.Spec.Volumes { + if volume.VolumeSource.PersistentVolumeClaim != nil && volume.VolumeSource.PersistentVolumeClaim.ClaimName == pvcName { + pods = append(pods, pod) + } + } + } + + return pods, nil +} + +func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, pods []corev1.Pod) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + printPersistentVolumeClaim(w, pvc, true) + printPodsMultiline(w, "Used By", pods) + + if len(pvc.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n") + w.Write(LEVEL_1, "Type\tStatus\tLastProbeTime\tLastTransitionTime\tReason\tMessage\n") + w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n") + for _, c := range pvc.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n", + c.Type, + c.Status, + c.LastProbeTime.Time.Format(time.RFC1123Z), + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +// printPersistentVolumeClaim is used for both PVCs and PersistentVolumeClaimTemplate. For the latter, +// we need to skip some fields which have no meaning. +func printPersistentVolumeClaim(w PrefixWriter, pvc *corev1.PersistentVolumeClaim, isFullPVC bool) { + if isFullPVC { + w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace) + } + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc)) + if isFullPVC { + if pvc.ObjectMeta.DeletionTimestamp != nil { + w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pvc.ObjectMeta.DeletionTimestamp)) + } else { + w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase) + } + } + w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName) + printLabelsMultiline(w, "Labels", pvc.Labels) + printAnnotationsMultiline(w, "Annotations", pvc.Annotations) + if isFullPVC { + w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers) + } + storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + capacity := "" + accessModes := "" + if pvc.Spec.VolumeName != "" { + accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes) + storage = pvc.Status.Capacity[corev1.ResourceStorage] + capacity = storage.String() + } + w.Write(LEVEL_0, "Capacity:\t%s\n", capacity) + w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes) + if pvc.Spec.VolumeMode != nil { + w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode) + } + if pvc.Spec.DataSource != nil { + w.Write(LEVEL_0, "DataSource:\n") + if pvc.Spec.DataSource.APIGroup != nil { + w.Write(LEVEL_1, "APIGroup:\t%v\n", *pvc.Spec.DataSource.APIGroup) + } + w.Write(LEVEL_1, "Kind:\t%v\n", pvc.Spec.DataSource.Kind) + w.Write(LEVEL_1, "Name:\t%v\n", pvc.Spec.DataSource.Name) + } +} + +func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus, + resolverFn EnvVarResolverFunc, w PrefixWriter, space string) { + statuses := map[string]corev1.ContainerStatus{} + for _, status := range containerStatuses { + statuses[status.Name] = status + } + + describeContainersLabel(containers, label, space, w) + + for _, container := range containers { + status, ok := statuses[container.Name] + describeContainerBasicInfo(container, status, ok, space, w) + describeContainerCommand(container, w) + if ok { + describeContainerState(status, w) + } + describeContainerResource(container, w) + describeContainerProbe(container, w) + if len(container.EnvFrom) > 0 { + describeContainerEnvFrom(container, resolverFn, w) + } + describeContainerEnvVars(container, resolverFn, w) + describeContainerVolumes(container, w) + } +} + +func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) { + none := "" + if len(containers) == 0 { + none = " " + } + w.Write(LEVEL_0, "%s%s:%s\n", space, label, none) +} + +func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) { + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + w.Write(LEVEL_1, "%s%v:\n", nameIndent, container.Name) + if ok { + w.Write(LEVEL_2, "Container ID:\t%s\n", status.ContainerID) + } + w.Write(LEVEL_2, "Image:\t%s\n", container.Image) + if ok { + w.Write(LEVEL_2, "Image ID:\t%s\n", status.ImageID) + } + portString := describeContainerPorts(container.Ports) + if strings.Contains(portString, ",") { + w.Write(LEVEL_2, "Ports:\t%s\n", portString) + } else { + w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString)) + } + hostPortString := describeContainerHostPorts(container.Ports) + if strings.Contains(hostPortString, ",") { + w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString) + } else { + w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString)) + } +} + +func describeContainerPorts(cPorts []corev1.ContainerPort) string { + ports := make([]string, 0, len(cPorts)) + for _, cPort := range cPorts { + ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) + } + return strings.Join(ports, ", ") +} + +func describeContainerHostPorts(cPorts []corev1.ContainerPort) string { + ports := make([]string, 0, len(cPorts)) + for _, cPort := range cPorts { + ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol)) + } + return strings.Join(ports, ", ") +} + +func describeContainerCommand(container corev1.Container, w PrefixWriter) { + if len(container.Command) > 0 { + w.Write(LEVEL_2, "Command:\n") + for _, c := range container.Command { + for _, s := range strings.Split(c, "\n") { + w.Write(LEVEL_3, "%s\n", s) + } + } + } + if len(container.Args) > 0 { + w.Write(LEVEL_2, "Args:\n") + for _, arg := range container.Args { + for _, s := range strings.Split(arg, "\n") { + w.Write(LEVEL_3, "%s\n", s) + } + } + } +} + +func describeContainerResource(container corev1.Container, w PrefixWriter) { + resources := container.Resources + if len(resources.Limits) > 0 { + w.Write(LEVEL_2, "Limits:\n") + } + for _, name := range SortedResourceNames(resources.Limits) { + quantity := resources.Limits[name] + w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String()) + } + + if len(resources.Requests) > 0 { + w.Write(LEVEL_2, "Requests:\n") + } + for _, name := range SortedResourceNames(resources.Requests) { + quantity := resources.Requests[name] + w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String()) + } +} + +func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) { + describeStatus("State", status.State, w) + if status.LastTerminationState.Terminated != nil { + describeStatus("Last State", status.LastTerminationState, w) + } + w.Write(LEVEL_2, "Ready:\t%v\n", printBool(status.Ready)) + w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount) +} + +func describeContainerProbe(container corev1.Container, w PrefixWriter) { + if container.LivenessProbe != nil { + probe := DescribeProbe(container.LivenessProbe) + w.Write(LEVEL_2, "Liveness:\t%s\n", probe) + } + if container.ReadinessProbe != nil { + probe := DescribeProbe(container.ReadinessProbe) + w.Write(LEVEL_2, "Readiness:\t%s\n", probe) + } + if container.StartupProbe != nil { + probe := DescribeProbe(container.StartupProbe) + w.Write(LEVEL_2, "Startup:\t%s\n", probe) + } +} + +func describeContainerVolumes(container corev1.Container, w PrefixWriter) { + // Show volumeMounts + none := "" + if len(container.VolumeMounts) == 0 { + none = "\t" + } + w.Write(LEVEL_2, "Mounts:%s\n", none) + sort.Sort(SortableVolumeMounts(container.VolumeMounts)) + for _, mount := range container.VolumeMounts { + flags := []string{} + if mount.ReadOnly { + flags = append(flags, "ro") + } else { + flags = append(flags, "rw") + } + if len(mount.SubPath) > 0 { + flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath)) + } + w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ",")) + } + // Show volumeDevices if exists + if len(container.VolumeDevices) > 0 { + w.Write(LEVEL_2, "Devices:%s\n", none) + sort.Sort(SortableVolumeDevices(container.VolumeDevices)) + for _, device := range container.VolumeDevices { + w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name) + } + } +} + +func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { + none := "" + if len(container.Env) == 0 { + none = "\t" + } + w.Write(LEVEL_2, "Environment:%s\n", none) + + for _, e := range container.Env { + if e.ValueFrom == nil { + for i, s := range strings.Split(e.Value, "\n") { + if i == 0 { + w.Write(LEVEL_3, "%s:\t%s\n", e.Name, s) + } else { + w.Write(LEVEL_3, "\t%s\n", s) + } + } + continue + } + + switch { + case e.ValueFrom.FieldRef != nil: + var valueFrom string + if resolverFn != nil { + valueFrom = resolverFn(e) + } + w.Write(LEVEL_3, "%s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) + case e.ValueFrom.ResourceFieldRef != nil: + valueFrom, err := resourcehelper.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) + if err != nil { + valueFrom = "" + } + resource := e.ValueFrom.ResourceFieldRef.Resource + if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") { + valueFrom = "node allocatable" + } + w.Write(LEVEL_3, "%s:\t%s (%s)\n", e.Name, valueFrom, resource) + case e.ValueFrom.SecretKeyRef != nil: + optional := e.ValueFrom.SecretKeyRef.Optional != nil && *e.ValueFrom.SecretKeyRef.Optional + w.Write(LEVEL_3, "%s:\t\tOptional: %t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name, optional) + case e.ValueFrom.ConfigMapKeyRef != nil: + optional := e.ValueFrom.ConfigMapKeyRef.Optional != nil && *e.ValueFrom.ConfigMapKeyRef.Optional + w.Write(LEVEL_3, "%s:\t\tOptional: %t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name, optional) + } + } +} + +func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { + none := "" + if len(container.EnvFrom) == 0 { + none = "\t" + } + w.Write(LEVEL_2, "Environment Variables from:%s\n", none) + + for _, e := range container.EnvFrom { + from := "" + name := "" + optional := false + if e.ConfigMapRef != nil { + from = "ConfigMap" + name = e.ConfigMapRef.Name + optional = e.ConfigMapRef.Optional != nil && *e.ConfigMapRef.Optional + } else if e.SecretRef != nil { + from = "Secret" + name = e.SecretRef.Name + optional = e.SecretRef.Optional != nil && *e.SecretRef.Optional + } + if len(e.Prefix) == 0 { + w.Write(LEVEL_3, "%s\t%s\tOptional: %t\n", name, from, optional) + } else { + w.Write(LEVEL_3, "%s\t%s with prefix '%s'\tOptional: %t\n", name, from, e.Prefix, optional) + } + } +} + +// DescribeProbe is exported for consumers in other API groups that have probes +func DescribeProbe(probe *corev1.Probe) string { + attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) + switch { + case probe.Exec != nil: + return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs) + case probe.HTTPGet != nil: + url := &url.URL{} + url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme)) + if len(probe.HTTPGet.Port.String()) > 0 { + url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String()) + } else { + url.Host = probe.HTTPGet.Host + } + url.Path = probe.HTTPGet.Path + return fmt.Sprintf("http-get %s %s", url.String(), attrs) + case probe.TCPSocket != nil: + return fmt.Sprintf("tcp-socket %s:%s %s", probe.TCPSocket.Host, probe.TCPSocket.Port.String(), attrs) + + case probe.GRPC != nil: + return fmt.Sprintf("grpc :%d %s %s", probe.GRPC.Port, *(probe.GRPC.Service), attrs) + } + return fmt.Sprintf("unknown %s", attrs) +} + +type EnvVarResolverFunc func(e corev1.EnvVar) string + +// EnvValueFrom is exported for use by describers in other packages +func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc { + return func(e corev1.EnvVar) string { + gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion) + if err != nil { + return "" + } + gvk := gv.WithKind("Pod") + internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") + if err != nil { + return "" // pod validation should catch this on create + } + + valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) + if err != nil { + return "" // pod validation should catch this on create + } + + return valueFrom + } +} + +func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) { + switch { + case state.Running != nil: + w.Write(LEVEL_2, "%s:\tRunning\n", stateName) + w.Write(LEVEL_3, "Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z)) + case state.Waiting != nil: + w.Write(LEVEL_2, "%s:\tWaiting\n", stateName) + if state.Waiting.Reason != "" { + w.Write(LEVEL_3, "Reason:\t%s\n", state.Waiting.Reason) + } + case state.Terminated != nil: + w.Write(LEVEL_2, "%s:\tTerminated\n", stateName) + if state.Terminated.Reason != "" { + w.Write(LEVEL_3, "Reason:\t%s\n", state.Terminated.Reason) + } + if state.Terminated.Message != "" { + w.Write(LEVEL_3, "Message:\t%s\n", state.Terminated.Message) + } + w.Write(LEVEL_3, "Exit Code:\t%d\n", state.Terminated.ExitCode) + if state.Terminated.Signal > 0 { + w.Write(LEVEL_3, "Signal:\t%d\n", state.Terminated.Signal) + } + w.Write(LEVEL_3, "Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_3, "Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) + default: + w.Write(LEVEL_2, "%s:\tWaiting\n", stateName) + } +} + +func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) { + if len(templates) == 0 { + w.Write(LEVEL_0, "Volume Claims:\t\n") + return + } + w.Write(LEVEL_0, "Volume Claims:\n") + for _, pvc := range templates { + w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name) + w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc)) + printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString()) + printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString()) + if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { + w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String()) + } else { + w.Write(LEVEL_1, "Capacity:\t%s\n", "") + } + w.Write(LEVEL_1, "Access Modes:\t%s\n", pvc.Spec.AccessModes) + } +} + +func printBoolPtr(value *bool) string { + if value != nil { + return printBool(*value) + } + + return "" +} + +func printBool(value bool) string { + if value { + return "True" + } + + return "False" +} + +// ReplicationControllerDescriber generates information about a replication controller +// and the pods it has created. +type ReplicationControllerDescriber struct { + clientset.Interface +} + +func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rc := d.CoreV1().ReplicationControllers(namespace) + pc := d.CoreV1().Pods(namespace) + + controller, err := rc.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + selector := labels.SelectorFromSet(controller.Spec.Selector) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, controller.UID, describerSettings) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), controller, describerSettings.ChunkSize) + } + + return describeReplicationController(controller, events, running, waiting, succeeded, failed) +} + +func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", controller.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", controller.Namespace) + w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) + printLabelsMultiline(w, "Labels", controller.Labels) + printAnnotationsMultiline(w, "Annotations", controller.Annotations) + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas) + w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + DescribePodTemplate(controller.Spec.Template, w) + if len(controller.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") + w.Write(LEVEL_1, "----\t------\t------\n") + for _, c := range controller.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) + } + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) { + w.Write(LEVEL_0, "Pod Template:\n") + if template == nil { + w.Write(LEVEL_1, "") + return + } + printLabelsMultiline(w, " Labels", template.Labels) + if len(template.Annotations) > 0 { + printAnnotationsMultiline(w, " Annotations", template.Annotations) + } + if len(template.Spec.ServiceAccountName) > 0 { + w.Write(LEVEL_1, "Service Account:\t%s\n", template.Spec.ServiceAccountName) + } + if len(template.Spec.InitContainers) > 0 { + describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, w, " ") + } + describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ") + describeVolumes(template.Spec.Volumes, w, " ") + if len(template.Spec.PriorityClassName) > 0 { + w.Write(LEVEL_1, "Priority Class Name:\t%s\n", template.Spec.PriorityClassName) + } +} + +// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. +type ReplicaSetDescriber struct { + clientset.Interface +} + +func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rsc := d.AppsV1().ReplicaSets(namespace) + pc := d.CoreV1().Pods(namespace) + + rs, err := rsc.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID, describerSettings) + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), rs, describerSettings.ChunkSize) + } + + return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) +} + +func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", rs.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", rs.Namespace) + w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(rs.Spec.Selector)) + printLabelsMultiline(w, "Labels", rs.Labels) + printAnnotationsMultiline(w, "Annotations", rs.Annotations) + if controlledBy := printController(rs); len(controlledBy) > 0 { + w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) + } + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas) + w.Write(LEVEL_0, "Pods Status:\t") + if getPodErr != nil { + w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr) + } else { + w.Write(LEVEL_0, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + } + DescribePodTemplate(&rs.Spec.Template, w) + if len(rs.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") + w.Write(LEVEL_1, "----\t------\t------\n") + for _, c := range rs.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) + } + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// JobDescriber generates information about a job and the pods it has created. +type JobDescriber struct { + clientset.Interface +} + +func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + job, err := d.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), job, describerSettings.ChunkSize) + } + + return describeJob(job, events) +} + +func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", job.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", job.Namespace) + if selector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector); err == nil { + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + } else { + w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err) + } + printLabelsMultiline(w, "Labels", job.Labels) + printAnnotationsMultiline(w, "Annotations", job.Annotations) + if controlledBy := printController(job); len(controlledBy) > 0 { + w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) + } + if job.Spec.Parallelism != nil { + w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism) + } + if job.Spec.Completions != nil { + w.Write(LEVEL_0, "Completions:\t%d\n", *job.Spec.Completions) + } else { + w.Write(LEVEL_0, "Completions:\t\n") + } + if job.Spec.CompletionMode != nil { + w.Write(LEVEL_0, "Completion Mode:\t%s\n", *job.Spec.CompletionMode) + } + if job.Status.StartTime != nil { + w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) + } + if job.Status.CompletionTime != nil { + w.Write(LEVEL_0, "Completed At:\t%s\n", job.Status.CompletionTime.Time.Format(time.RFC1123Z)) + } + if job.Status.StartTime != nil && job.Status.CompletionTime != nil { + w.Write(LEVEL_0, "Duration:\t%s\n", duration.HumanDuration(job.Status.CompletionTime.Sub(job.Status.StartTime.Time))) + } + if job.Spec.ActiveDeadlineSeconds != nil { + w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) + } + if job.Status.Ready == nil { + w.Write(LEVEL_0, "Pods Statuses:\t%d Active / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) + } else { + w.Write(LEVEL_0, "Pods Statuses:\t%d Active (%d Ready) / %d Succeeded / %d Failed\n", job.Status.Active, *job.Status.Ready, job.Status.Succeeded, job.Status.Failed) + } + if job.Spec.CompletionMode != nil && *job.Spec.CompletionMode == batchv1.IndexedCompletion { + w.Write(LEVEL_0, "Completed Indexes:\t%s\n", capIndexesListOrNone(job.Status.CompletedIndexes, 50)) + } + DescribePodTemplate(&job.Spec.Template, w) + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func capIndexesListOrNone(indexes string, softLimit int) string { + if len(indexes) == 0 { + return "" + } + ix := softLimit + for ; ix < len(indexes); ix++ { + if indexes[ix] == ',' { + break + } + } + if ix >= len(indexes) { + return indexes + } + return indexes[:ix+1] + "..." +} + +// CronJobDescriber generates information about a cron job and the jobs it has created. +type CronJobDescriber struct { + client clientset.Interface +} + +func (d *CronJobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + + cronJob, err := d.client.BatchV1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(d.client.CoreV1(), cronJob, describerSettings.ChunkSize) + } + return describeCronJob(cronJob, events) + } + + // TODO: drop this condition when beta disappears in 1.25 + cronJobBeta, err := d.client.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + if describerSettings.ShowEvents { + events, _ = searchEvents(d.client.CoreV1(), cronJobBeta, describerSettings.ChunkSize) + } + return describeCronJobBeta(cronJobBeta, events) +} + +func describeCronJob(cronJob *batchv1.CronJob, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace) + printLabelsMultiline(w, "Labels", cronJob.Labels) + printAnnotationsMultiline(w, "Annotations", cronJob.Annotations) + w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule) + w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy) + w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend)) + if cronJob.Spec.SuccessfulJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", *cronJob.Spec.SuccessfulJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Successful Job History Limit:\t\n") + } + if cronJob.Spec.FailedJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Failed Job History Limit:\t\n") + } + if cronJob.Spec.StartingDeadlineSeconds != nil { + w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds) + } else { + w.Write(LEVEL_0, "Starting Deadline Seconds:\t\n") + } + describeJobTemplate(cronJob.Spec.JobTemplate, w) + if cronJob.Status.LastScheduleTime != nil { + w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z)) + } else { + w.Write(LEVEL_0, "Last Schedule Time:\t\n") + } + printActiveJobs(w, "Active Jobs", cronJob.Status.Active) + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func describeJobTemplate(jobTemplate batchv1.JobTemplateSpec, w PrefixWriter) { + if jobTemplate.Spec.Selector != nil { + if selector, err := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector); err == nil { + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + } else { + w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err) + } + } else { + w.Write(LEVEL_0, "Selector:\t\n") + } + if jobTemplate.Spec.Parallelism != nil { + w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism) + } else { + w.Write(LEVEL_0, "Parallelism:\t\n") + } + if jobTemplate.Spec.Completions != nil { + w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions) + } else { + w.Write(LEVEL_0, "Completions:\t\n") + } + if jobTemplate.Spec.ActiveDeadlineSeconds != nil { + w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds) + } + DescribePodTemplate(&jobTemplate.Spec.Template, w) +} + +func describeCronJobBeta(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace) + printLabelsMultiline(w, "Labels", cronJob.Labels) + printAnnotationsMultiline(w, "Annotations", cronJob.Annotations) + w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule) + w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy) + w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend)) + if cronJob.Spec.SuccessfulJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", *cronJob.Spec.SuccessfulJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Successful Job History Limit:\t\n") + } + if cronJob.Spec.FailedJobsHistoryLimit != nil { + w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit) + } else { + w.Write(LEVEL_0, "Failed Job History Limit:\t\n") + } + if cronJob.Spec.StartingDeadlineSeconds != nil { + w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds) + } else { + w.Write(LEVEL_0, "Starting Deadline Seconds:\t\n") + } + describeJobTemplateBeta(cronJob.Spec.JobTemplate, w) + if cronJob.Status.LastScheduleTime != nil { + w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z)) + } else { + w.Write(LEVEL_0, "Last Schedule Time:\t\n") + } + printActiveJobs(w, "Active Jobs", cronJob.Status.Active) + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func describeJobTemplateBeta(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) { + if jobTemplate.Spec.Selector != nil { + if selector, err := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector); err == nil { + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + } else { + w.Write(LEVEL_0, "Selector:\tFailed to get selector: %s\n", err) + } + } else { + w.Write(LEVEL_0, "Selector:\t\n") + } + if jobTemplate.Spec.Parallelism != nil { + w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism) + } else { + w.Write(LEVEL_0, "Parallelism:\t\n") + } + if jobTemplate.Spec.Completions != nil { + w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions) + } else { + w.Write(LEVEL_0, "Completions:\t\n") + } + if jobTemplate.Spec.ActiveDeadlineSeconds != nil { + w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds) + } + DescribePodTemplate(&jobTemplate.Spec.Template, w) +} + +func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) { + w.Write(LEVEL_0, "%s:\t", title) + if len(jobs) == 0 { + w.WriteLine("") + return + } + + for i, job := range jobs { + if i != 0 { + w.Write(LEVEL_0, ", ") + } + w.Write(LEVEL_0, "%s", job.Name) + } + w.WriteLine("") +} + +// DaemonSetDescriber generates information about a daemon set and the pods it has created. +type DaemonSetDescriber struct { + clientset.Interface +} + +func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + dc := d.AppsV1().DaemonSets(namespace) + pc := d.CoreV1().Pods(namespace) + + daemon, err := dc.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector) + if err != nil { + return "", err + } + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID, describerSettings) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), daemon, describerSettings.ChunkSize) + } + + return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) +} + +func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name) + selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector) + if err != nil { + // this shouldn't happen if LabelSelector passed validation + return err + } + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + w.Write(LEVEL_0, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) + printLabelsMultiline(w, "Labels", daemon.Labels) + printAnnotationsMultiline(w, "Annotations", daemon.Annotations) + w.Write(LEVEL_0, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) + w.Write(LEVEL_0, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) + w.Write(LEVEL_0, "Number of Nodes Scheduled with Up-to-date Pods: %d\n", daemon.Status.UpdatedNumberScheduled) + w.Write(LEVEL_0, "Number of Nodes Scheduled with Available Pods: %d\n", daemon.Status.NumberAvailable) + w.Write(LEVEL_0, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) + w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + DescribePodTemplate(&daemon.Spec.Template, w) + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// SecretDescriber generates information about a secret +type SecretDescriber struct { + clientset.Interface +} + +func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().Secrets(namespace) + + secret, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return describeSecret(secret) +} + +func describeSecret(secret *corev1.Secret) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", secret.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace) + printLabelsMultiline(w, "Labels", secret.Labels) + printAnnotationsMultiline(w, "Annotations", secret.Annotations) + + w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type) + + w.Write(LEVEL_0, "\nData\n====\n") + for k, v := range secret.Data { + switch { + case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken: + w.Write(LEVEL_0, "%s:\t%s\n", k, string(v)) + default: + w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v)) + } + } + + return nil + }) +} + +type IngressDescriber struct { + client clientset.Interface +} + +func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + + // try ingress/v1 first (v1.19) and fallback to ingress/v1beta if an err occurs + netV1, err := i.client.NetworkingV1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(i.client.CoreV1(), netV1, describerSettings.ChunkSize) + } + return i.describeIngressV1(netV1, events) + } + netV1beta1, err := i.client.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(i.client.CoreV1(), netV1beta1, describerSettings.ChunkSize) + } + return i.describeIngressV1beta1(netV1beta1, events) + } + return "", err +} + +func (i *IngressDescriber) describeBackendV1beta1(ns string, backend *networkingv1beta1.IngressBackend) string { + endpoints, err := i.client.CoreV1().Endpoints(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{}) + if err != nil { + return fmt.Sprintf("", err) + } + service, err := i.client.CoreV1().Services(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{}) + if err != nil { + return fmt.Sprintf("", err) + } + spName := "" + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + switch backend.ServicePort.Type { + case intstr.String: + if backend.ServicePort.StrVal == sp.Name { + spName = sp.Name + } + case intstr.Int: + if int32(backend.ServicePort.IntVal) == sp.Port { + spName = sp.Name + } + } + } + return formatEndpoints(endpoints, sets.NewString(spName)) +} + +func (i *IngressDescriber) describeBackendV1(ns string, backend *networkingv1.IngressBackend) string { + + if backend.Service != nil { + sb := serviceBackendStringer(backend.Service) + endpoints, err := i.client.CoreV1().Endpoints(ns).Get(context.TODO(), backend.Service.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Sprintf("%v ()", sb, err) + } + service, err := i.client.CoreV1().Services(ns).Get(context.TODO(), backend.Service.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Sprintf("%v()", sb, err) + } + spName := "" + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + if backend.Service.Port.Number != 0 && backend.Service.Port.Number == sp.Port { + spName = sp.Name + } else if len(backend.Service.Port.Name) > 0 && backend.Service.Port.Name == sp.Name { + spName = sp.Name + } + } + ep := formatEndpoints(endpoints, sets.NewString(spName)) + return fmt.Sprintf("%s (%s)", sb, ep) + } + if backend.Resource != nil { + ic := backend.Resource + apiGroup := "" + if ic.APIGroup != nil { + apiGroup = fmt.Sprintf("%v", *ic.APIGroup) + } + return fmt.Sprintf("APIGroup: %v, Kind: %v, Name: %v", apiGroup, ic.Kind, ic.Name) + } + return "" +} + +func (i *IngressDescriber) describeIngressV1(ing *networkingv1.Ingress, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) + printLabelsMultiline(w, "Labels", ing.Labels) + w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace) + w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) + def := ing.Spec.DefaultBackend + ns := ing.Namespace + if def == nil { + // Ingresses that don't specify a default backend inherit the + // default backend in the kube-system namespace. + def = &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "default-http-backend", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + } + ns = metav1.NamespaceSystem + } + w.Write(LEVEL_0, "Default backend:\t%s\n", i.describeBackendV1(ns, def)) + if len(ing.Spec.TLS) != 0 { + describeIngressTLSV1(w, ing.Spec.TLS) + } + w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n") + w.Write(LEVEL_1, "----\t----\t--------\n") + count := 0 + for _, rules := range ing.Spec.Rules { + + if rules.HTTP == nil { + continue + } + count++ + host := rules.Host + if len(host) == 0 { + host = "*" + } + w.Write(LEVEL_1, "%s\t\n", host) + for _, path := range rules.HTTP.Paths { + w.Write(LEVEL_2, "\t%s \t%s\n", path.Path, i.describeBackendV1(ing.Namespace, &path.Backend)) + } + } + if count == 0 { + w.Write(LEVEL_1, "%s\t%s\t%s\n", "*", "*", i.describeBackendV1(ns, def)) + } + printAnnotationsMultiline(w, "Annotations", ing.Annotations) + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func (i *IngressDescriber) describeIngressV1beta1(ing *networkingv1beta1.Ingress, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) + printLabelsMultiline(w, "Labels", ing.Labels) + w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace) + w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) + def := ing.Spec.Backend + ns := ing.Namespace + if def == nil { + // Ingresses that don't specify a default backend inherit the + // default backend in the kube-system namespace. + def = &networkingv1beta1.IngressBackend{ + ServiceName: "default-http-backend", + ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, + } + ns = metav1.NamespaceSystem + } + w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackendV1beta1(ns, def)) + if len(ing.Spec.TLS) != 0 { + describeIngressTLSV1beta1(w, ing.Spec.TLS) + } + w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n") + w.Write(LEVEL_1, "----\t----\t--------\n") + count := 0 + for _, rules := range ing.Spec.Rules { + + if rules.HTTP == nil { + continue + } + count++ + host := rules.Host + if len(host) == 0 { + host = "*" + } + w.Write(LEVEL_1, "%s\t\n", host) + for _, path := range rules.HTTP.Paths { + w.Write(LEVEL_2, "\t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackendV1beta1(ing.Namespace, &path.Backend)) + } + } + if count == 0 { + w.Write(LEVEL_1, "%s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackendV1beta1(ns, def)) + } + printAnnotationsMultiline(w, "Annotations", ing.Annotations) + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func describeIngressTLSV1beta1(w PrefixWriter, ingTLS []networkingv1beta1.IngressTLS) { + w.Write(LEVEL_0, "TLS:\n") + for _, t := range ingTLS { + if t.SecretName == "" { + w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ",")) + } else { + w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) + } + } +} + +func describeIngressTLSV1(w PrefixWriter, ingTLS []networkingv1.IngressTLS) { + w.Write(LEVEL_0, "TLS:\n") + for _, t := range ingTLS { + if t.SecretName == "" { + w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ",")) + } else { + w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) + } + } +} + +type IngressClassDescriber struct { + client clientset.Interface +} + +func (i *IngressClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + // try IngressClass/v1 first (v1.19) and fallback to IngressClass/v1beta if an err occurs + netV1, err := i.client.NetworkingV1().IngressClasses().Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(i.client.CoreV1(), netV1, describerSettings.ChunkSize) + } + return i.describeIngressClassV1(netV1, events) + } + netV1beta1, err := i.client.NetworkingV1beta1().IngressClasses().Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(i.client.CoreV1(), netV1beta1, describerSettings.ChunkSize) + } + return i.describeIngressClassV1beta1(netV1beta1, events) + } + return "", err +} + +func (i *IngressClassDescriber) describeIngressClassV1beta1(ic *networkingv1beta1.IngressClass, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", ic.Name) + printLabelsMultiline(w, "Labels", ic.Labels) + printAnnotationsMultiline(w, "Annotations", ic.Annotations) + w.Write(LEVEL_0, "Controller:\t%v\n", ic.Spec.Controller) + + if ic.Spec.Parameters != nil { + w.Write(LEVEL_0, "Parameters:\n") + if ic.Spec.Parameters.APIGroup != nil { + w.Write(LEVEL_1, "APIGroup:\t%v\n", *ic.Spec.Parameters.APIGroup) + } + w.Write(LEVEL_1, "Kind:\t%v\n", ic.Spec.Parameters.Kind) + w.Write(LEVEL_1, "Name:\t%v\n", ic.Spec.Parameters.Name) + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func (i *IngressClassDescriber) describeIngressClassV1(ic *networkingv1.IngressClass, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", ic.Name) + printLabelsMultiline(w, "Labels", ic.Labels) + printAnnotationsMultiline(w, "Annotations", ic.Annotations) + w.Write(LEVEL_0, "Controller:\t%v\n", ic.Spec.Controller) + + if ic.Spec.Parameters != nil { + w.Write(LEVEL_0, "Parameters:\n") + if ic.Spec.Parameters.APIGroup != nil { + w.Write(LEVEL_1, "APIGroup:\t%v\n", *ic.Spec.Parameters.APIGroup) + } + w.Write(LEVEL_1, "Kind:\t%v\n", ic.Spec.Parameters.Kind) + w.Write(LEVEL_1, "Name:\t%v\n", ic.Spec.Parameters.Name) + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// ServiceDescriber generates information about a service. +type ServiceDescriber struct { + clientset.Interface +} + +func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().Services(namespace) + + service, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), service, describerSettings.ChunkSize) + } + return describeService(service, endpoints, events) +} + +func buildIngressString(ingress []corev1.LoadBalancerIngress) string { + var buffer bytes.Buffer + + for i := range ingress { + if i != 0 { + buffer.WriteString(", ") + } + if ingress[i].IP != "" { + buffer.WriteString(ingress[i].IP) + } else { + buffer.WriteString(ingress[i].Hostname) + } + } + return buffer.String() +} + +func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) { + if endpoints == nil { + endpoints = &corev1.Endpoints{} + } + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", service.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", service.Namespace) + printLabelsMultiline(w, "Labels", service.Labels) + printAnnotationsMultiline(w, "Annotations", service.Annotations) + w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) + w.Write(LEVEL_0, "Type:\t%s\n", service.Spec.Type) + + if service.Spec.IPFamilyPolicy != nil { + w.Write(LEVEL_0, "IP Family Policy:\t%s\n", *(service.Spec.IPFamilyPolicy)) + } + + if len(service.Spec.IPFamilies) > 0 { + ipfamiliesStrings := make([]string, 0, len(service.Spec.IPFamilies)) + for _, family := range service.Spec.IPFamilies { + ipfamiliesStrings = append(ipfamiliesStrings, string(family)) + } + + w.Write(LEVEL_0, "IP Families:\t%s\n", strings.Join(ipfamiliesStrings, ",")) + } else { + w.Write(LEVEL_0, "IP Families:\t%s\n", "") + } + + w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.ClusterIP) + if len(service.Spec.ClusterIPs) > 0 { + w.Write(LEVEL_0, "IPs:\t%s\n", strings.Join(service.Spec.ClusterIPs, ",")) + } else { + w.Write(LEVEL_0, "IPs:\t%s\n", "") + } + + if len(service.Spec.ExternalIPs) > 0 { + w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ",")) + } + if service.Spec.LoadBalancerIP != "" { + w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP) + } + if service.Spec.ExternalName != "" { + w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName) + } + if len(service.Status.LoadBalancer.Ingress) > 0 { + list := buildIngressString(service.Status.LoadBalancer.Ingress) + w.Write(LEVEL_0, "LoadBalancer Ingress:\t%s\n", list) + } + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + + name := sp.Name + if name == "" { + name = "" + } + w.Write(LEVEL_0, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) + if sp.TargetPort.Type == intstr.Type(intstr.Int) { + w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol) + } else { + w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol) + } + if sp.NodePort != 0 { + w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) + } + w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) + } + w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) + if service.Spec.ExternalTrafficPolicy != "" { + w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy) + } + if service.Spec.HealthCheckNodePort != 0 { + w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort) + } + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + w.Write(LEVEL_0, "LoadBalancer Source Ranges:\t%v\n", strings.Join(service.Spec.LoadBalancerSourceRanges, ",")) + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// EndpointsDescriber generates information about an Endpoint. +type EndpointsDescriber struct { + clientset.Interface +} + +func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().Endpoints(namespace) + + ep, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), ep, describerSettings.ChunkSize) + } + + return describeEndpoints(ep, events) +} + +func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", ep.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", ep.Namespace) + printLabelsMultiline(w, "Labels", ep.Labels) + printAnnotationsMultiline(w, "Annotations", ep.Annotations) + + w.Write(LEVEL_0, "Subsets:\n") + for i := range ep.Subsets { + subset := &ep.Subsets[i] + + addresses := make([]string, 0, len(subset.Addresses)) + for _, addr := range subset.Addresses { + addresses = append(addresses, addr.IP) + } + addressesString := strings.Join(addresses, ",") + if len(addressesString) == 0 { + addressesString = "" + } + w.Write(LEVEL_1, "Addresses:\t%s\n", addressesString) + + notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) + for _, addr := range subset.NotReadyAddresses { + notReadyAddresses = append(notReadyAddresses, addr.IP) + } + notReadyAddressesString := strings.Join(notReadyAddresses, ",") + if len(notReadyAddressesString) == 0 { + notReadyAddressesString = "" + } + w.Write(LEVEL_1, "NotReadyAddresses:\t%s\n", notReadyAddressesString) + + if len(subset.Ports) > 0 { + w.Write(LEVEL_1, "Ports:\n") + w.Write(LEVEL_2, "Name\tPort\tProtocol\n") + w.Write(LEVEL_2, "----\t----\t--------\n") + for _, port := range subset.Ports { + name := port.Name + if len(name) == 0 { + name = "" + } + w.Write(LEVEL_2, "%s\t%d\t%s\n", name, port.Port, port.Protocol) + } + } + w.Write(LEVEL_0, "\n") + } + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// EndpointSliceDescriber generates information about an EndpointSlice. +type EndpointSliceDescriber struct { + clientset.Interface +} + +func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + // try endpointslice/v1 first (v1.21) and fallback to v1beta1 if error occurs + + epsV1, err := d.DiscoveryV1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), epsV1, describerSettings.ChunkSize) + } + return describeEndpointSliceV1(epsV1, events) + } + + epsV1beta1, err := d.DiscoveryV1beta1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), epsV1beta1, describerSettings.ChunkSize) + } + + return describeEndpointSliceV1beta1(epsV1beta1, events) +} + +func describeEndpointSliceV1(eps *discoveryv1.EndpointSlice, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", eps.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", eps.Namespace) + printLabelsMultiline(w, "Labels", eps.Labels) + printAnnotationsMultiline(w, "Annotations", eps.Annotations) + + w.Write(LEVEL_0, "AddressType:\t%s\n", string(eps.AddressType)) + + if len(eps.Ports) == 0 { + w.Write(LEVEL_0, "Ports: \n") + } else { + w.Write(LEVEL_0, "Ports:\n") + w.Write(LEVEL_1, "Name\tPort\tProtocol\n") + w.Write(LEVEL_1, "----\t----\t--------\n") + for _, port := range eps.Ports { + portName := "" + if port.Name != nil && len(*port.Name) > 0 { + portName = *port.Name + } + + portNum := "" + if port.Port != nil { + portNum = strconv.Itoa(int(*port.Port)) + } + + w.Write(LEVEL_1, "%s\t%s\t%s\n", portName, portNum, *port.Protocol) + } + } + + if len(eps.Endpoints) == 0 { + w.Write(LEVEL_0, "Endpoints: \n") + } else { + w.Write(LEVEL_0, "Endpoints:\n") + for i := range eps.Endpoints { + endpoint := &eps.Endpoints[i] + + addressesString := strings.Join(endpoint.Addresses, ", ") + if len(addressesString) == 0 { + addressesString = "" + } + w.Write(LEVEL_1, "- Addresses:\t%s\n", addressesString) + + w.Write(LEVEL_2, "Conditions:\n") + readyText := "" + if endpoint.Conditions.Ready != nil { + readyText = strconv.FormatBool(*endpoint.Conditions.Ready) + } + w.Write(LEVEL_3, "Ready:\t%s\n", readyText) + + hostnameText := "" + if endpoint.Hostname != nil { + hostnameText = *endpoint.Hostname + } + w.Write(LEVEL_2, "Hostname:\t%s\n", hostnameText) + + if endpoint.TargetRef != nil { + w.Write(LEVEL_2, "TargetRef:\t%s/%s\n", endpoint.TargetRef.Kind, endpoint.TargetRef.Name) + } + + nodeNameText := "" + if endpoint.NodeName != nil { + nodeNameText = *endpoint.NodeName + } + w.Write(LEVEL_2, "NodeName:\t%s\n", nodeNameText) + + zoneText := "" + if endpoint.Zone != nil { + zoneText = *endpoint.Zone + } + w.Write(LEVEL_2, "Zone:\t%s\n", zoneText) + } + } + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func describeEndpointSliceV1beta1(eps *discoveryv1beta1.EndpointSlice, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", eps.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", eps.Namespace) + printLabelsMultiline(w, "Labels", eps.Labels) + printAnnotationsMultiline(w, "Annotations", eps.Annotations) + + w.Write(LEVEL_0, "AddressType:\t%s\n", string(eps.AddressType)) + + if len(eps.Ports) == 0 { + w.Write(LEVEL_0, "Ports: \n") + } else { + w.Write(LEVEL_0, "Ports:\n") + w.Write(LEVEL_1, "Name\tPort\tProtocol\n") + w.Write(LEVEL_1, "----\t----\t--------\n") + for _, port := range eps.Ports { + portName := "" + if port.Name != nil && len(*port.Name) > 0 { + portName = *port.Name + } + + portNum := "" + if port.Port != nil { + portNum = strconv.Itoa(int(*port.Port)) + } + + w.Write(LEVEL_1, "%s\t%s\t%s\n", portName, portNum, *port.Protocol) + } + } + + if len(eps.Endpoints) == 0 { + w.Write(LEVEL_0, "Endpoints: \n") + } else { + w.Write(LEVEL_0, "Endpoints:\n") + for i := range eps.Endpoints { + endpoint := &eps.Endpoints[i] + + addressesString := strings.Join(endpoint.Addresses, ",") + if len(addressesString) == 0 { + addressesString = "" + } + w.Write(LEVEL_1, "- Addresses:\t%s\n", addressesString) + + w.Write(LEVEL_2, "Conditions:\n") + readyText := "" + if endpoint.Conditions.Ready != nil { + readyText = strconv.FormatBool(*endpoint.Conditions.Ready) + } + w.Write(LEVEL_3, "Ready:\t%s\n", readyText) + + hostnameText := "" + if endpoint.Hostname != nil { + hostnameText = *endpoint.Hostname + } + w.Write(LEVEL_2, "Hostname:\t%s\n", hostnameText) + + if endpoint.TargetRef != nil { + w.Write(LEVEL_2, "TargetRef:\t%s/%s\n", endpoint.TargetRef.Kind, endpoint.TargetRef.Name) + } + + printLabelsMultilineWithIndent(w, " ", "Topology", "\t", endpoint.Topology, sets.NewString()) + } + } + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +// ServiceAccountDescriber generates information about a service. +type ServiceAccountDescriber struct { + clientset.Interface +} + +func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().ServiceAccounts(namespace) + + serviceAccount, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + tokens := []corev1.Secret{} + + // missingSecrets is the set of all secrets present in the + // serviceAccount but not present in the set of existing secrets. + missingSecrets := sets.NewString() + secrets := corev1.SecretList{} + err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize}, + func(options metav1.ListOptions) (runtime.Object, error) { + newList, err := d.CoreV1().Secrets(namespace).List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourceSecrets.String()) + } + secrets.Items = append(secrets.Items, newList.Items...) + return newList, nil + }) + + // errors are tolerated here in order to describe the serviceAccount with all + // of the secrets that it references, even if those secrets cannot be fetched. + if err == nil { + // existingSecrets is the set of all secrets remaining on a + // service account that are not present in the "tokens" slice. + existingSecrets := sets.NewString() + + for _, s := range secrets.Items { + if s.Type == corev1.SecretTypeServiceAccountToken { + name := s.Annotations[corev1.ServiceAccountNameKey] + uid := s.Annotations[corev1.ServiceAccountUIDKey] + if name == serviceAccount.Name && uid == string(serviceAccount.UID) { + tokens = append(tokens, s) + } + } + existingSecrets.Insert(s.Name) + } + + for _, s := range serviceAccount.Secrets { + if !existingSecrets.Has(s.Name) { + missingSecrets.Insert(s.Name) + } + } + for _, s := range serviceAccount.ImagePullSecrets { + if !existingSecrets.Has(s.Name) { + missingSecrets.Insert(s.Name) + } + } + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), serviceAccount, describerSettings.ChunkSize) + } + + return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) +} + +func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace) + printLabelsMultiline(w, "Labels", serviceAccount.Labels) + printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations) + + var ( + emptyHeader = " " + pullHeader = "Image pull secrets:" + mountHeader = "Mountable secrets: " + tokenHeader = "Tokens: " + + pullSecretNames = []string{} + mountSecretNames = []string{} + tokenSecretNames = []string{} + ) + + for _, s := range serviceAccount.ImagePullSecrets { + pullSecretNames = append(pullSecretNames, s.Name) + } + for _, s := range serviceAccount.Secrets { + mountSecretNames = append(mountSecretNames, s.Name) + } + for _, s := range tokens { + tokenSecretNames = append(tokenSecretNames, s.Name) + } + + types := map[string][]string{ + pullHeader: pullSecretNames, + mountHeader: mountSecretNames, + tokenHeader: tokenSecretNames, + } + for _, header := range sets.StringKeySet(types).List() { + names := types[header] + if len(names) == 0 { + w.Write(LEVEL_0, "%s\t\n", header) + } else { + prefix := header + for _, name := range names { + if missingSecrets.Has(name) { + w.Write(LEVEL_0, "%s\t%s (not found)\n", prefix, name) + } else { + w.Write(LEVEL_0, "%s\t%s\n", prefix, name) + } + prefix = emptyHeader + } + } + } + + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +// RoleDescriber generates information about a node. +type RoleDescriber struct { + clientset.Interface +} + +func (d *RoleDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + role, err := d.RbacV1().Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + breakdownRules := []rbacv1.PolicyRule{} + for _, rule := range role.Rules { + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) + } + + compactRules, err := rbac.CompactRules(breakdownRules) + if err != nil { + return "", err + } + sort.Stable(rbac.SortableRuleSlice(compactRules)) + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", role.Name) + printLabelsMultiline(w, "Labels", role.Labels) + printAnnotationsMultiline(w, "Annotations", role.Annotations) + + w.Write(LEVEL_0, "PolicyRule:\n") + w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n") + w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n") + for _, r := range compactRules { + w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs) + } + + return nil + }) +} + +// ClusterRoleDescriber generates information about a node. +type ClusterRoleDescriber struct { + clientset.Interface +} + +func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + role, err := d.RbacV1().ClusterRoles().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + breakdownRules := []rbacv1.PolicyRule{} + for _, rule := range role.Rules { + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) + } + + compactRules, err := rbac.CompactRules(breakdownRules) + if err != nil { + return "", err + } + sort.Stable(rbac.SortableRuleSlice(compactRules)) + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", role.Name) + printLabelsMultiline(w, "Labels", role.Labels) + printAnnotationsMultiline(w, "Annotations", role.Annotations) + + w.Write(LEVEL_0, "PolicyRule:\n") + w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n") + w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n") + for _, r := range compactRules { + w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs) + } + + return nil + }) +} + +func CombineResourceGroup(resource, group []string) string { + if len(resource) == 0 { + return "" + } + parts := strings.SplitN(resource[0], "/", 2) + combine := parts[0] + + if len(group) > 0 && group[0] != "" { + combine = combine + "." + group[0] + } + + if len(parts) == 2 { + combine = combine + "/" + parts[1] + } + return combine +} + +// RoleBindingDescriber generates information about a node. +type RoleBindingDescriber struct { + clientset.Interface +} + +func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + binding, err := d.RbacV1().RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", binding.Name) + printLabelsMultiline(w, "Labels", binding.Labels) + printAnnotationsMultiline(w, "Annotations", binding.Annotations) + + w.Write(LEVEL_0, "Role:\n") + w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind) + w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name) + + w.Write(LEVEL_0, "Subjects:\n") + w.Write(LEVEL_1, "Kind\tName\tNamespace\n") + w.Write(LEVEL_1, "----\t----\t---------\n") + for _, s := range binding.Subjects { + w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace) + } + + return nil + }) +} + +// ClusterRoleBindingDescriber generates information about a node. +type ClusterRoleBindingDescriber struct { + clientset.Interface +} + +func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + binding, err := d.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", binding.Name) + printLabelsMultiline(w, "Labels", binding.Labels) + printAnnotationsMultiline(w, "Annotations", binding.Annotations) + + w.Write(LEVEL_0, "Role:\n") + w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind) + w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name) + + w.Write(LEVEL_0, "Subjects:\n") + w.Write(LEVEL_1, "Kind\tName\tNamespace\n") + w.Write(LEVEL_1, "----\t----\t---------\n") + for _, s := range binding.Subjects { + w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace) + } + + return nil + }) +} + +// NodeDescriber generates information about a node. +type NodeDescriber struct { + clientset.Interface +} + +func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + mc := d.CoreV1().Nodes() + node, err := mc.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed)) + if err != nil { + return "", err + } + // in a policy aware setting, users may have access to a node, but not all pods + // in that case, we note that the user does not have access to the pods + canViewPods := true + initialOpts := metav1.ListOptions{ + FieldSelector: fieldSelector.String(), + Limit: describerSettings.ChunkSize, + } + nodeNonTerminatedPodsList, err := getPodsInChunks(d.CoreV1().Pods(namespace), initialOpts) + if err != nil { + if !apierrors.IsForbidden(err) { + return "", err + } + canViewPods = false + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + if ref, err := reference.GetReference(scheme.Scheme, node); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", node, err) + } else { + // TODO: We haven't decided the namespace for Node object yet. + ref.UID = types.UID(ref.Name) + events, _ = searchEvents(d.CoreV1(), ref, describerSettings.ChunkSize) + } + } + + return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods, &LeaseDescriber{d}) +} + +type LeaseDescriber struct { + client clientset.Interface +} + +func describeNode(node *corev1.Node, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, + canViewPods bool, ld *LeaseDescriber) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", node.Name) + if roles := findNodeRoles(node); len(roles) > 0 { + w.Write(LEVEL_0, "Roles:\t%s\n", strings.Join(roles, ",")) + } else { + w.Write(LEVEL_0, "Roles:\t%s\n", "") + } + printLabelsMultiline(w, "Labels", node.Labels) + printAnnotationsMultiline(w, "Annotations", node.Annotations) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) + printNodeTaintsMultiline(w, "Taints", node.Spec.Taints) + w.Write(LEVEL_0, "Unschedulable:\t%v\n", node.Spec.Unschedulable) + + if ld != nil { + if lease, err := ld.client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), node.Name, metav1.GetOptions{}); err == nil { + describeNodeLease(lease, w) + } else { + w.Write(LEVEL_0, "Lease:\tFailed to get lease: %s\n", err) + } + } + + if len(node.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") + w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n") + for _, c := range node.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n", + c.Type, + c.Status, + c.LastHeartbeatTime.Time.Format(time.RFC1123Z), + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + + w.Write(LEVEL_0, "Addresses:\n") + for _, address := range node.Status.Addresses { + w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address) + } + + printResourceList := func(resourceList corev1.ResourceList) { + resources := make([]corev1.ResourceName, 0, len(resourceList)) + for resource := range resourceList { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + for _, resource := range resources { + value := resourceList[resource] + w.Write(LEVEL_0, " %s:\t%s\n", resource, value.String()) + } + } + + if len(node.Status.Capacity) > 0 { + w.Write(LEVEL_0, "Capacity:\n") + printResourceList(node.Status.Capacity) + } + if len(node.Status.Allocatable) > 0 { + w.Write(LEVEL_0, "Allocatable:\n") + printResourceList(node.Status.Allocatable) + } + + w.Write(LEVEL_0, "System Info:\n") + w.Write(LEVEL_0, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) + w.Write(LEVEL_0, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) + w.Write(LEVEL_0, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) + w.Write(LEVEL_0, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) + w.Write(LEVEL_0, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) + w.Write(LEVEL_0, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) + w.Write(LEVEL_0, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) + w.Write(LEVEL_0, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) + w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) + w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) + + // remove when .PodCIDR is depreciated + if len(node.Spec.PodCIDR) > 0 { + w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR) + } + + if len(node.Spec.PodCIDRs) > 0 { + w.Write(LEVEL_0, "PodCIDRs:\t%s\n", strings.Join(node.Spec.PodCIDRs, ",")) + } + if len(node.Spec.ProviderID) > 0 { + w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID) + } + if canViewPods && nodeNonTerminatedPodsList != nil { + describeNodeResource(nodeNonTerminatedPodsList, node, w) + } else { + w.Write(LEVEL_0, "Pods:\tnot authorized\n") + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func describeNodeLease(lease *coordinationv1.Lease, w PrefixWriter) { + w.Write(LEVEL_0, "Lease:\n") + holderIdentity := "" + if lease != nil && lease.Spec.HolderIdentity != nil { + holderIdentity = *lease.Spec.HolderIdentity + } + w.Write(LEVEL_1, "HolderIdentity:\t%s\n", holderIdentity) + acquireTime := "" + if lease != nil && lease.Spec.AcquireTime != nil { + acquireTime = lease.Spec.AcquireTime.Time.Format(time.RFC1123Z) + } + w.Write(LEVEL_1, "AcquireTime:\t%s\n", acquireTime) + renewTime := "" + if lease != nil && lease.Spec.RenewTime != nil { + renewTime = lease.Spec.RenewTime.Time.Format(time.RFC1123Z) + } + w.Write(LEVEL_1, "RenewTime:\t%s\n", renewTime) +} + +type StatefulSetDescriber struct { + client clientset.Interface +} + +func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ps, err := p.client.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + pc := p.client.CoreV1().Pods(namespace) + + selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID, describerSettings) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(p.client.CoreV1(), ps, describerSettings.ChunkSize) + } + + return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) +} + +func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", ps.ObjectMeta.Namespace) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + printLabelsMultiline(w, "Labels", ps.Labels) + printAnnotationsMultiline(w, "Annotations", ps.Annotations) + w.Write(LEVEL_0, "Replicas:\t%d desired | %d total\n", *ps.Spec.Replicas, ps.Status.Replicas) + w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type) + if ps.Spec.UpdateStrategy.RollingUpdate != nil { + ru := ps.Spec.UpdateStrategy.RollingUpdate + if ru.Partition != nil { + w.Write(LEVEL_1, "Partition:\t%d\n", *ru.Partition) + } + } + + w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + DescribePodTemplate(&ps.Spec.Template, w) + describeVolumeClaimTemplates(ps.Spec.VolumeClaimTemplates, w) + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +type CertificateSigningRequestDescriber struct { + client clientset.Interface +} + +func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + + var ( + crBytes []byte + metadata metav1.ObjectMeta + status string + signerName string + expirationSeconds *int32 + username string + events *corev1.EventList + ) + + if csr, err := p.client.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}); err == nil { + crBytes = csr.Spec.Request + metadata = csr.ObjectMeta + conditionTypes := []string{} + for _, c := range csr.Status.Conditions { + conditionTypes = append(conditionTypes, string(c.Type)) + } + status = extractCSRStatus(conditionTypes, csr.Status.Certificate) + signerName = csr.Spec.SignerName + expirationSeconds = csr.Spec.ExpirationSeconds + username = csr.Spec.Username + if describerSettings.ShowEvents { + events, _ = searchEvents(p.client.CoreV1(), csr, describerSettings.ChunkSize) + } + } else if csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}); err == nil { + crBytes = csr.Spec.Request + metadata = csr.ObjectMeta + conditionTypes := []string{} + for _, c := range csr.Status.Conditions { + conditionTypes = append(conditionTypes, string(c.Type)) + } + status = extractCSRStatus(conditionTypes, csr.Status.Certificate) + if csr.Spec.SignerName != nil { + signerName = *csr.Spec.SignerName + } + expirationSeconds = csr.Spec.ExpirationSeconds + username = csr.Spec.Username + if describerSettings.ShowEvents { + events, _ = searchEvents(p.client.CoreV1(), csr, describerSettings.ChunkSize) + } + } else { + return "", err + } + + cr, err := certificate.ParseCSR(crBytes) + if err != nil { + return "", fmt.Errorf("Error parsing CSR: %v", err) + } + + return describeCertificateSigningRequest(metadata, signerName, expirationSeconds, username, cr, status, events) +} + +func describeCertificateSigningRequest(csr metav1.ObjectMeta, signerName string, expirationSeconds *int32, username string, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) { + printListHelper := func(w PrefixWriter, prefix, name string, values []string) { + if len(values) == 0 { + return + } + w.Write(LEVEL_0, prefix+name+":\t") + w.Write(LEVEL_0, strings.Join(values, "\n"+prefix+"\t")) + w.Write(LEVEL_0, "\n") + } + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", csr.Name) + w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(csr.Labels)) + w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations)) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_0, "Requesting User:\t%s\n", username) + if len(signerName) > 0 { + w.Write(LEVEL_0, "Signer:\t%s\n", signerName) + } + if expirationSeconds != nil { + w.Write(LEVEL_0, "Requested Duration:\t%s\n", duration.HumanDuration(utilcsr.ExpirationSecondsToDuration(*expirationSeconds))) + } + w.Write(LEVEL_0, "Status:\t%s\n", status) + + w.Write(LEVEL_0, "Subject:\n") + w.Write(LEVEL_0, "\tCommon Name:\t%s\n", cr.Subject.CommonName) + w.Write(LEVEL_0, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber) + printListHelper(w, "\t", "Organization", cr.Subject.Organization) + printListHelper(w, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit) + printListHelper(w, "\t", "Country", cr.Subject.Country) + printListHelper(w, "\t", "Locality", cr.Subject.Locality) + printListHelper(w, "\t", "Province", cr.Subject.Province) + printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress) + printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode) + + if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses)+len(cr.URIs) > 0 { + w.Write(LEVEL_0, "Subject Alternative Names:\n") + printListHelper(w, "\t", "DNS Names", cr.DNSNames) + printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses) + var uris []string + for _, uri := range cr.URIs { + uris = append(uris, uri.String()) + } + printListHelper(w, "\t", "URIs", uris) + var ipaddrs []string + for _, ipaddr := range cr.IPAddresses { + ipaddrs = append(ipaddrs, ipaddr.String()) + } + printListHelper(w, "\t", "IP Addresses", ipaddrs) + } + + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. +type HorizontalPodAutoscalerDescriber struct { + client clientset.Interface +} + +func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + + // autoscaling/v2beta2 is introduced since v1.12 and autoscaling/v1 does not have full backward compatibility + // with autoscaling/v2beta2, so describer will try to get and describe hpa v2beta2 object firstly, if it fails, + // describer will fall back to do with hpa v1 object + hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(d.client.CoreV1(), hpaV2beta2, describerSettings.ChunkSize) + } + return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d) + } + + hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(d.client.CoreV1(), hpaV1, describerSettings.ChunkSize) + } + return describeHorizontalPodAutoscalerV1(hpaV1, events, d) + } + + return "", err +} + +func describeHorizontalPodAutoscalerV2beta2(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace) + printLabelsMultiline(w, "Labels", hpa.Labels) + printAnnotationsMultiline(w, "Annotations", hpa.Annotations) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_0, "Reference:\t%s/%s\n", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) + w.Write(LEVEL_0, "Metrics:\t( current / target )\n") + for i, metric := range hpa.Spec.Metrics { + switch metric.Type { + case autoscalingv2beta2.ExternalMetricSourceType: + if metric.External.Target.AverageValue != nil { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil && + hpa.Status.CurrentMetrics[i].External.Current.AverageValue != nil { + current = hpa.Status.CurrentMetrics[i].External.Current.AverageValue.String() + } + w.Write(LEVEL_1, "%q (target average value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.AverageValue.String()) + } else { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil { + current = hpa.Status.CurrentMetrics[i].External.Current.Value.String() + } + w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String()) + + } + case autoscalingv2beta2.PodsMetricSourceType: + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil { + current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String() + } + w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String()) + case autoscalingv2beta2.ObjectMetricSourceType: + w.Write(LEVEL_1, "\"%s\" on %s/%s ", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name) + if metric.Object.Target.Type == autoscalingv2beta2.AverageValueMetricType { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { + current = hpa.Status.CurrentMetrics[i].Object.Current.AverageValue.String() + } + w.Write(LEVEL_0, "(target average value):\t%s / %s\n", current, metric.Object.Target.AverageValue.String()) + } else { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { + current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String() + } + w.Write(LEVEL_0, "(target value):\t%s / %s\n", current, metric.Object.Target.Value.String()) + } + case autoscalingv2beta2.ResourceMetricSourceType: + w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name)) + if metric.Resource.Target.AverageValue != nil { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil { + current = hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String() + } + w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.Resource.Target.AverageValue.String()) + } else { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil && hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization != nil { + current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization, hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String()) + } + + target := "" + if metric.Resource.Target.AverageUtilization != nil { + target = fmt.Sprintf("%d%%", *metric.Resource.Target.AverageUtilization) + } + w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target) + } + case autoscalingv2beta2.ContainerResourceMetricSourceType: + w.Write(LEVEL_1, "resource %s of container \"%s\" on pods", string(metric.ContainerResource.Name), metric.ContainerResource.Container) + if metric.ContainerResource.Target.AverageValue != nil { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].ContainerResource != nil { + current = hpa.Status.CurrentMetrics[i].ContainerResource.Current.AverageValue.String() + } + w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.ContainerResource.Target.AverageValue.String()) + } else { + current := "" + if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].ContainerResource != nil && hpa.Status.CurrentMetrics[i].ContainerResource.Current.AverageUtilization != nil { + current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].ContainerResource.Current.AverageUtilization, hpa.Status.CurrentMetrics[i].ContainerResource.Current.AverageValue.String()) + } + + target := "" + if metric.ContainerResource.Target.AverageUtilization != nil { + target = fmt.Sprintf("%d%%", *metric.ContainerResource.Target.AverageUtilization) + } + w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target) + } + default: + w.Write(LEVEL_1, "\n", string(metric.Type)) + } + } + minReplicas := "" + if hpa.Spec.MinReplicas != nil { + minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) + } + w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas) + w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) + // only print the hpa behavior if present + if hpa.Spec.Behavior != nil { + w.Write(LEVEL_0, "Behavior:\n") + printDirectionBehavior(w, "Scale Up", hpa.Spec.Behavior.ScaleUp) + printDirectionBehavior(w, "Scale Down", hpa.Spec.Behavior.ScaleDown) + } + w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind) + w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas) + + if len(hpa.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n") + w.Write(LEVEL_1, "Type\tStatus\tReason\tMessage\n") + w.Write(LEVEL_1, "----\t------\t------\t-------\n") + for _, c := range hpa.Status.Conditions { + w.Write(LEVEL_1, "%v\t%v\t%v\t%v\n", c.Type, c.Status, c.Reason, c.Message) + } + } + + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +func printDirectionBehavior(w PrefixWriter, direction string, rules *autoscalingv2beta2.HPAScalingRules) { + if rules != nil { + w.Write(LEVEL_1, "%s:\n", direction) + if rules.StabilizationWindowSeconds != nil { + w.Write(LEVEL_2, "Stabilization Window: %d seconds\n", *rules.StabilizationWindowSeconds) + } + if len(rules.Policies) > 0 { + if rules.SelectPolicy != nil { + w.Write(LEVEL_2, "Select Policy: %s\n", *rules.SelectPolicy) + } else { + w.Write(LEVEL_2, "Select Policy: %s\n", autoscalingv2beta2.MaxPolicySelect) + } + w.Write(LEVEL_2, "Policies:\n") + for _, p := range rules.Policies { + w.Write(LEVEL_3, "- Type: %s\tValue: %d\tPeriod: %d seconds\n", p.Type, p.Value, p.PeriodSeconds) + } + } + } +} + +func describeHorizontalPodAutoscalerV1(hpa *autoscalingv1.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace) + printLabelsMultiline(w, "Labels", hpa.Labels) + printAnnotationsMultiline(w, "Annotations", hpa.Annotations) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_0, "Reference:\t%s/%s\n", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) + + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + w.Write(LEVEL_0, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) + current := "" + if hpa.Status.CurrentCPUUtilizationPercentage != nil { + current = fmt.Sprintf("%d", *hpa.Status.CurrentCPUUtilizationPercentage) + } + w.Write(LEVEL_0, "Current CPU utilization:\t%s%%\n", current) + } + + minReplicas := "" + if hpa.Spec.MinReplicas != nil { + minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) + } + w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas) + w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) + w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind) + w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas) + + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) { + w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) + w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAge\n") + w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n") + allocatable := node.Status.Capacity + if len(node.Status.Allocatable) > 0 { + allocatable = node.Status.Allocatable + } + + for _, pod := range nodeNonTerminatedPodsList.Items { + req, limit := resourcehelper.PodRequestsAndLimits(&pod) + cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory] + fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 + fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 + w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name, + cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp)) + } + + w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n") + w.Write(LEVEL_1, "Resource\tRequests\tLimits\n") + w.Write(LEVEL_1, "--------\t--------\t------\n") + reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) + cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits := + reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage] + fractionCpuReqs := float64(0) + fractionCpuLimits := float64(0) + if allocatable.Cpu().MilliValue() != 0 { + fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + } + fractionMemoryReqs := float64(0) + fractionMemoryLimits := float64(0) + if allocatable.Memory().Value() != 0 { + fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 + fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 + } + fractionEphemeralStorageReqs := float64(0) + fractionEphemeralStorageLimits := float64(0) + if allocatable.StorageEphemeral().Value() != 0 { + fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + } + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) + + extResources := make([]string, 0, len(allocatable)) + hugePageResources := make([]string, 0, len(allocatable)) + for resource := range allocatable { + if resourcehelper.IsHugePageResourceName(resource) { + hugePageResources = append(hugePageResources, string(resource)) + } else if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { + extResources = append(extResources, string(resource)) + } + } + + sort.Strings(extResources) + sort.Strings(hugePageResources) + + for _, resource := range hugePageResources { + hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)] + fractionHugePageSizeRequests := float64(0) + fractionHugePageSizeLimits := float64(0) + if hugePageSizeAllocable.Value() != 0 { + fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100 + } + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + resource, hugePageSizeRequests.String(), int64(fractionHugePageSizeRequests), hugePageSizeLimits.String(), int64(fractionHugePageSizeLimits)) + } + + for _, ext := range extResources { + extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] + w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) + } +} + +func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) { + reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{} + for _, pod := range podList.Items { + podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod) + for podReqName, podReqValue := range podReqs { + if value, ok := reqs[podReqName]; !ok { + reqs[podReqName] = podReqValue.DeepCopy() + } else { + value.Add(podReqValue) + reqs[podReqName] = value + } + } + for podLimitName, podLimitValue := range podLimits { + if value, ok := limits[podLimitName]; !ok { + limits[podLimitName] = podLimitValue.DeepCopy() + } else { + value.Add(podLimitValue) + limits[podLimitName] = value + } + } + } + return +} + +func DescribeEvents(el *corev1.EventList, w PrefixWriter) { + if len(el.Items) == 0 { + w.Write(LEVEL_0, "Events:\t\n") + return + } + w.Flush() + sort.Sort(event.SortableEvents(el.Items)) + w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n") + w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n") + for _, e := range el.Items { + var interval string + firstTimestampSince := translateMicroTimestampSince(e.EventTime) + if e.EventTime.IsZero() { + firstTimestampSince = translateTimestampSince(e.FirstTimestamp) + } + if e.Series != nil { + interval = fmt.Sprintf("%s (x%d over %s)", translateMicroTimestampSince(e.Series.LastObservedTime), e.Series.Count, firstTimestampSince) + } else if e.Count > 1 { + interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, firstTimestampSince) + } else { + interval = firstTimestampSince + } + source := e.Source.Component + if source == "" { + source = e.ReportingController + } + w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n", + e.Type, + e.Reason, + interval, + source, + strings.TrimSpace(e.Message), + ) + } +} + +// DeploymentDescriber generates information about a deployment. +type DeploymentDescriber struct { + client clientset.Interface +} + +func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + d, err := dd.client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(dd.client.CoreV1(), d, describerSettings.ChunkSize) + } + + var oldRSs, newRSs []*appsv1.ReplicaSet + if oldResult, _, newResult, err := deploymentutil.GetAllReplicaSetsInChunks(d, dd.client.AppsV1(), describerSettings.ChunkSize); err == nil { + oldRSs = oldResult + if newResult != nil { + newRSs = append(newRSs, newResult) + } + } + + return describeDeployment(d, oldRSs, newRSs, events) +} + +func describeDeployment(d *appsv1.Deployment, oldRSs []*appsv1.ReplicaSet, newRSs []*appsv1.ReplicaSet, events *corev1.EventList) (string, error) { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return "", err + } + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", d.ObjectMeta.Namespace) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) + printLabelsMultiline(w, "Labels", d.Labels) + printAnnotationsMultiline(w, "Annotations", d.Annotations) + w.Write(LEVEL_0, "Selector:\t%s\n", selector) + w.Write(LEVEL_0, "Replicas:\t%d desired | %d updated | %d total | %d available | %d unavailable\n", *(d.Spec.Replicas), d.Status.UpdatedReplicas, d.Status.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) + w.Write(LEVEL_0, "StrategyType:\t%s\n", d.Spec.Strategy.Type) + w.Write(LEVEL_0, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds) + if d.Spec.Strategy.RollingUpdate != nil { + ru := d.Spec.Strategy.RollingUpdate + w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String()) + } + DescribePodTemplate(&d.Spec.Template, w) + if len(d.Status.Conditions) > 0 { + w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") + w.Write(LEVEL_1, "----\t------\t------\n") + for _, c := range d.Status.Conditions { + w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) + } + } + + if len(oldRSs) > 0 || len(newRSs) > 0 { + w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) + w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) + } + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { + // Format the matching ReplicaSets into strings. + rsStrings := make([]string, 0, len(matchingRSs)) + for _, rs := range matchingRSs { + rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, *rs.Spec.Replicas)) + } + + list := strings.Join(rsStrings, ", ") + if list == "" { + return "" + } + return list +} + +func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID, settings DescriberSettings) ( + running, waiting, succeeded, failed int, err error) { + initialOpts := metav1.ListOptions{LabelSelector: selector.String(), Limit: settings.ChunkSize} + rcPods, err := getPodsInChunks(c, initialOpts) + if err != nil { + return + } + for _, pod := range rcPods.Items { + controllerRef := metav1.GetControllerOf(&pod) + // Skip pods that are orphans or owned by other controllers. + if controllerRef == nil || controllerRef.UID != uid { + continue + } + switch pod.Status.Phase { + case corev1.PodRunning: + running++ + case corev1.PodPending: + waiting++ + case corev1.PodSucceeded: + succeeded++ + case corev1.PodFailed: + failed++ + } + } + return +} + +func getPodsInChunks(c corev1client.PodInterface, initialOpts metav1.ListOptions) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := runtimeresource.FollowContinue(&initialOpts, + func(options metav1.ListOptions) (runtime.Object, error) { + newList, err := c.List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourcePods.String()) + } + podList.Items = append(podList.Items, newList.Items...) + return newList, nil + }) + return podList, err +} + +// ConfigMapDescriber generates information about a ConfigMap +type ConfigMapDescriber struct { + clientset.Interface +} + +func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.CoreV1().ConfigMaps(namespace) + + configMap, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", configMap.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", configMap.Namespace) + printLabelsMultiline(w, "Labels", configMap.Labels) + printAnnotationsMultiline(w, "Annotations", configMap.Annotations) + + w.Write(LEVEL_0, "\nData\n====\n") + for k, v := range configMap.Data { + w.Write(LEVEL_0, "%s:\n----\n", k) + w.Write(LEVEL_0, "%s\n", string(v)) + } + w.Write(LEVEL_0, "\nBinaryData\n====\n") + for k, v := range configMap.BinaryData { + w.Write(LEVEL_0, "%s: %s bytes\n", k, strconv.Itoa(len(v))) + } + w.Write(LEVEL_0, "\n") + + if describerSettings.ShowEvents { + events, err := searchEvents(d.CoreV1(), configMap, describerSettings.ChunkSize) + if err != nil { + return err + } + if events != nil { + DescribeEvents(events, w) + } + } + return nil + }) +} + +// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy +type NetworkPolicyDescriber struct { + clientset.Interface +} + +func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.NetworkingV1().NetworkPolicies(namespace) + + networkPolicy, err := c.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return describeNetworkPolicy(networkPolicy) +} + +func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace) + w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp) + printLabelsMultiline(w, "Labels", networkPolicy.Labels) + printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations) + describeNetworkPolicySpec(networkPolicy.Spec, w) + return nil + }) +} + +func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) { + w.Write(LEVEL_0, "Spec:\n") + w.Write(LEVEL_1, "PodSelector: ") + if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 { + w.Write(LEVEL_2, " (Allowing the specific traffic to all pods in this namespace)\n") + } else { + w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector)) + } + + ingressEnabled, egressEnabled := getPolicyType(nps) + if ingressEnabled { + w.Write(LEVEL_1, "Allowing ingress traffic:\n") + printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w) + } else { + w.Write(LEVEL_1, "Not affecting ingress traffic\n") + } + if egressEnabled { + w.Write(LEVEL_1, "Allowing egress traffic:\n") + printNetworkPolicySpecEgressTo(nps.Egress, " ", w) + } else { + w.Write(LEVEL_1, "Not affecting egress traffic\n") + + } + w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes)) +} + +func getPolicyType(nps networkingv1.NetworkPolicySpec) (bool, bool) { + var ingress, egress bool + for _, pt := range nps.PolicyTypes { + switch pt { + case networkingv1.PolicyTypeIngress: + ingress = true + case networkingv1.PolicyTypeEgress: + egress = true + } + } + + return ingress, egress +} + +func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { + if len(npirs) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for ingress connectivity)") + return + } + for i, npir := range npirs { + if len(npir.Ports) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") + } else { + for _, port := range npir.Ports { + var proto corev1.Protocol + if port.Protocol != nil { + proto = *port.Protocol + } else { + proto = corev1.ProtocolTCP + } + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } + } + if len(npir.From) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: (traffic not restricted by source)") + } else { + for _, from := range npir.From { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "From:") + if from.PodSelector != nil && from.NamespaceSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector)) + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector)) + } else if from.PodSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector)) + } else if from.NamespaceSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector)) + } else if from.IPBlock != nil { + w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent) + w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, from.IPBlock.CIDR) + w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(from.IPBlock.Except, ", ")) + } + } + } + if i != len(npirs)-1 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") + } + } +} + +func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { + if len(npers) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for egress connectivity)") + return + } + for i, nper := range npers { + if len(nper.Ports) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") + } else { + for _, port := range nper.Ports { + var proto corev1.Protocol + if port.Protocol != nil { + proto = *port.Protocol + } else { + proto = corev1.ProtocolTCP + } + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } + } + if len(nper.To) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: (traffic not restricted by destination)") + } else { + for _, to := range nper.To { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To:") + if to.PodSelector != nil && to.NamespaceSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector)) + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector)) + } else if to.PodSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector)) + } else if to.NamespaceSelector != nil { + w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector)) + } else if to.IPBlock != nil { + w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent) + w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, to.IPBlock.CIDR) + w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(to.IPBlock.Except, ", ")) + } + } + } + if i != len(npers)-1 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") + } + } +} + +type StorageClassDescriber struct { + clientset.Interface +} + +func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + sc, err := s.StorageV1().StorageClasses().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(s.CoreV1(), sc, describerSettings.ChunkSize) + } + + return describeStorageClass(sc, events) +} + +func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", sc.Name) + w.Write(LEVEL_0, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta)) + w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations)) + w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner) + w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters)) + w.Write(LEVEL_0, "AllowVolumeExpansion:\t%s\n", printBoolPtr(sc.AllowVolumeExpansion)) + if len(sc.MountOptions) == 0 { + w.Write(LEVEL_0, "MountOptions:\t\n") + } else { + w.Write(LEVEL_0, "MountOptions:\n") + for _, option := range sc.MountOptions { + w.Write(LEVEL_1, "%s\n", option) + } + } + if sc.ReclaimPolicy != nil { + w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy) + } + if sc.VolumeBindingMode != nil { + w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode) + } + if sc.AllowedTopologies != nil { + printAllowedTopologies(w, sc.AllowedTopologies) + } + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +type CSINodeDescriber struct { + clientset.Interface +} + +func (c *CSINodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + csi, err := c.StorageV1().CSINodes().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(c.CoreV1(), csi, describerSettings.ChunkSize) + } + + return describeCSINode(csi, events) +} + +func describeCSINode(csi *storagev1.CSINode, events *corev1.EventList) (output string, err error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", csi.GetName()) + printLabelsMultiline(w, "Labels", csi.GetLabels()) + printAnnotationsMultiline(w, "Annotations", csi.GetAnnotations()) + w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csi.CreationTimestamp.Time.Format(time.RFC1123Z)) + w.Write(LEVEL_0, "Spec:\n") + if csi.Spec.Drivers != nil { + w.Write(LEVEL_1, "Drivers:\n") + for _, driver := range csi.Spec.Drivers { + w.Write(LEVEL_2, "%s:\n", driver.Name) + w.Write(LEVEL_3, "Node ID:\t%s\n", driver.NodeID) + if driver.Allocatable != nil && driver.Allocatable.Count != nil { + w.Write(LEVEL_3, "Allocatables:\n") + w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count) + } + if driver.TopologyKeys != nil { + w.Write(LEVEL_3, "Topology Keys:\t%s\n", driver.TopologyKeys) + } + } + } + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + +func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) { + w.Write(LEVEL_0, "AllowedTopologies:\t") + if len(topologies) == 0 { + w.WriteLine("") + return + } + w.WriteLine("") + for i, term := range topologies { + printTopologySelectorTermsMultilineWithIndent(w, LEVEL_1, fmt.Sprintf("Term %d", i), "\t", term.MatchLabelExpressions) + } +} + +func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) { + w.Write(indentLevel, "%s:%s", title, innerIndent) + + if len(reqs) == 0 { + w.WriteLine("") + return + } + + for i, req := range reqs { + if i != 0 { + w.Write(indentLevel, "%s", innerIndent) + } + exprStr := fmt.Sprintf("%s %s", req.Key, "in") + if len(req.Values) > 0 { + exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", ")) + } + w.Write(LEVEL_0, "%s\n", exprStr) + } +} + +type PodDisruptionBudgetDescriber struct { + clientset.Interface +} + +func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var ( + pdbv1 *policyv1.PodDisruptionBudget + pdbv1beta1 *policyv1beta1.PodDisruptionBudget + err error + ) + + pdbv1, err = p.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(p.CoreV1(), pdbv1, describerSettings.ChunkSize) + } + return describePodDisruptionBudgetV1(pdbv1, events) + } + + // try falling back to v1beta1 in NotFound error cases + if apierrors.IsNotFound(err) { + pdbv1beta1, err = p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + } + if err == nil { + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(p.CoreV1(), pdbv1beta1, describerSettings.ChunkSize) + } + return describePodDisruptionBudgetV1beta1(pdbv1beta1, events) + } + + return "", err +} + +func describePodDisruptionBudgetV1(pdb *policyv1.PodDisruptionBudget, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace) + + if pdb.Spec.MinAvailable != nil { + w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String()) + } else if pdb.Spec.MaxUnavailable != nil { + w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String()) + } + + if pdb.Spec.Selector != nil { + w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector)) + } else { + w.Write(LEVEL_0, "Selector:\t\n") + } + w.Write(LEVEL_0, "Status:\n") + w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.DisruptionsAllowed) + w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy) + w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy) + w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods) + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +func describePodDisruptionBudgetV1beta1(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name) + w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace) + + if pdb.Spec.MinAvailable != nil { + w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String()) + } else if pdb.Spec.MaxUnavailable != nil { + w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String()) + } + + if pdb.Spec.Selector != nil { + w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector)) + } else { + w.Write(LEVEL_0, "Selector:\t\n") + } + w.Write(LEVEL_0, "Status:\n") + w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.DisruptionsAllowed) + w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy) + w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy) + w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods) + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +// PriorityClassDescriber generates information about a PriorityClass. +type PriorityClassDescriber struct { + clientset.Interface +} + +func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + pc, err := s.SchedulingV1().PriorityClasses().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(s.CoreV1(), pc, describerSettings.ChunkSize) + } + + return describePriorityClass(pc, events) +} + +func describePriorityClass(pc *schedulingv1.PriorityClass, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", pc.Name) + w.Write(LEVEL_0, "Value:\t%v\n", pc.Value) + w.Write(LEVEL_0, "GlobalDefault:\t%v\n", pc.GlobalDefault) + w.Write(LEVEL_0, "Description:\t%s\n", pc.Description) + + w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(pc.Annotations)) + if events != nil { + DescribeEvents(events, w) + } + + return nil + }) +} + +// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1. +type PodSecurityPolicyDescriber struct { + clientset.Interface +} + +func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return describePodSecurityPolicy(psp) +} + +func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%s\n", psp.Name) + + w.Write(LEVEL_0, "\nSettings:\n") + + w.Write(LEVEL_1, "Allow Privileged:\t%t\n", psp.Spec.Privileged) + if psp.Spec.AllowPrivilegeEscalation != nil { + w.Write(LEVEL_1, "Allow Privilege Escalation:\t%t\n", *psp.Spec.AllowPrivilegeEscalation) + } else { + w.Write(LEVEL_1, "Allow Privilege Escalation:\t\n") + } + w.Write(LEVEL_1, "Default Add Capabilities:\t%v\n", capsToString(psp.Spec.DefaultAddCapabilities)) + w.Write(LEVEL_1, "Required Drop Capabilities:\t%s\n", capsToString(psp.Spec.RequiredDropCapabilities)) + w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities)) + w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes)) + + if len(psp.Spec.AllowedFlexVolumes) > 0 { + w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes)) + } + + if len(psp.Spec.AllowedCSIDrivers) > 0 { + w.Write(LEVEL_1, "Allowed CSI Drivers:\t%s\n", csiDriversToString(psp.Spec.AllowedCSIDrivers)) + } + + if len(psp.Spec.AllowedUnsafeSysctls) > 0 { + w.Write(LEVEL_1, "Allowed Unsafe Sysctls:\t%s\n", sysctlsToString(psp.Spec.AllowedUnsafeSysctls)) + } + if len(psp.Spec.ForbiddenSysctls) > 0 { + w.Write(LEVEL_1, "Forbidden Sysctls:\t%s\n", sysctlsToString(psp.Spec.ForbiddenSysctls)) + } + w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork) + w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts)) + w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID) + w.Write(LEVEL_1, "Allow Host IPC:\t%t\n", psp.Spec.HostIPC) + w.Write(LEVEL_1, "Read Only Root Filesystem:\t%v\n", psp.Spec.ReadOnlyRootFilesystem) + + w.Write(LEVEL_1, "SELinux Context Strategy: %s\t\n", string(psp.Spec.SELinux.Rule)) + var user, role, seLinuxType, level string + if psp.Spec.SELinux.SELinuxOptions != nil { + user = psp.Spec.SELinux.SELinuxOptions.User + role = psp.Spec.SELinux.SELinuxOptions.Role + seLinuxType = psp.Spec.SELinux.SELinuxOptions.Type + level = psp.Spec.SELinux.SELinuxOptions.Level + } + w.Write(LEVEL_2, "User:\t%s\n", stringOrNone(user)) + w.Write(LEVEL_2, "Role:\t%s\n", stringOrNone(role)) + w.Write(LEVEL_2, "Type:\t%s\n", stringOrNone(seLinuxType)) + w.Write(LEVEL_2, "Level:\t%s\n", stringOrNone(level)) + + w.Write(LEVEL_1, "Run As User Strategy: %s\t\n", string(psp.Spec.RunAsUser.Rule)) + w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.RunAsUser.Ranges)) + + w.Write(LEVEL_1, "FSGroup Strategy: %s\t\n", string(psp.Spec.FSGroup.Rule)) + w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.FSGroup.Ranges)) + + w.Write(LEVEL_1, "Supplemental Groups Strategy: %s\t\n", string(psp.Spec.SupplementalGroups.Rule)) + w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.SupplementalGroups.Ranges)) + + return nil + }) +} + +func stringOrNone(s string) string { + return stringOrDefaultValue(s, "") +} + +func stringOrDefaultValue(s, defaultValue string) string { + if len(s) > 0 { + return s + } + return defaultValue +} + +func fsTypeToString(volumes []policyv1beta1.FSType) string { + strVolumes := []string{} + for _, v := range volumes { + strVolumes = append(strVolumes, string(v)) + } + return stringOrNone(strings.Join(strVolumes, ",")) +} + +func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string { + volumes := []string{} + for _, flexVolume := range flexVolumes { + volumes = append(volumes, "driver="+flexVolume.Driver) + } + return stringOrDefaultValue(strings.Join(volumes, ","), "") +} + +func csiDriversToString(csiDrivers []policyv1beta1.AllowedCSIDriver) string { + drivers := []string{} + for _, csiDriver := range csiDrivers { + drivers = append(drivers, "driver="+csiDriver.Name) + } + return stringOrDefaultValue(strings.Join(drivers, ","), "") +} + +func sysctlsToString(sysctls []string) string { + return stringOrNone(strings.Join(sysctls, ",")) +} + +func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string { + formattedString := "" + if ranges != nil { + strRanges := []string{} + for _, r := range ranges { + strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) + } + formattedString = strings.Join(strRanges, ",") + } + return stringOrNone(formattedString) +} + +func idRangeToString(ranges []policyv1beta1.IDRange) string { + formattedString := "" + if ranges != nil { + strRanges := []string{} + for _, r := range ranges { + strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) + } + formattedString = strings.Join(strRanges, ",") + } + return stringOrNone(formattedString) +} + +func capsToString(caps []corev1.Capability) string { + formattedString := "" + if caps != nil { + strCaps := []string{} + for _, c := range caps { + strCaps = append(strCaps, string(c)) + } + formattedString = strings.Join(strCaps, ",") + } + return stringOrNone(formattedString) +} + +func policyTypesToString(pts []networkingv1.PolicyType) string { + formattedString := "" + if pts != nil { + strPts := []string{} + for _, p := range pts { + strPts = append(strPts, string(p)) + } + formattedString = strings.Join(strPts, ", ") + } + return stringOrNone(formattedString) +} + +// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. +func newErrNoDescriber(types ...reflect.Type) error { + names := make([]string, 0, len(types)) + for _, t := range types { + names = append(names, t.String()) + } + return ErrNoDescriber{Types: names} +} + +// Describers implements ObjectDescriber against functions registered via Add. Those functions can +// be strongly typed. Types are exactly matched (no conversion or assignable checks). +type Describers struct { + searchFns map[reflect.Type][]typeFunc +} + +// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string, +// if at least one describer function has been registered with the exact types passed, or if any +// describer can print the exact object in its first argument (the remainder will be provided empty +// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will +// be returned +// TODO: reorder and partial match extra. +func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) { + exactType := reflect.TypeOf(exact) + fns, ok := d.searchFns[exactType] + if !ok { + return "", newErrNoDescriber(exactType) + } + if len(extra) == 0 { + for _, typeFn := range fns { + if len(typeFn.Extra) == 0 { + return typeFn.Describe(exact, extra...) + } + } + typeFn := fns[0] + for _, t := range typeFn.Extra { + v := reflect.New(t).Elem() + extra = append(extra, v.Interface()) + } + return fns[0].Describe(exact, extra...) + } + + types := make([]reflect.Type, 0, len(extra)) + for _, obj := range extra { + types = append(types, reflect.TypeOf(obj)) + } + for _, typeFn := range fns { + if typeFn.Matches(types) { + return typeFn.Describe(exact, extra...) + } + } + return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) +} + +// Add adds one or more describer functions to the Describer. The passed function must +// match the signature: +// +// func(...) (string, error) +// +// Any number of arguments may be provided. +func (d *Describers) Add(fns ...interface{}) error { + for _, fn := range fns { + fv := reflect.ValueOf(fn) + ft := fv.Type() + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + numIn := ft.NumIn() + if numIn == 0 { + return fmt.Errorf("expected at least one 'in' params, got: %v", ft) + } + if ft.NumOut() != 2 { + return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) + } + types := make([]reflect.Type, 0, numIn) + for i := 0; i < numIn; i++ { + types = append(types, ft.In(i)) + } + if ft.Out(0) != reflect.TypeOf(string("")) { + return fmt.Errorf("expected string return, got: %v", ft) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil. + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(1) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + + exact := types[0] + extra := types[1:] + if d.searchFns == nil { + d.searchFns = make(map[reflect.Type][]typeFunc) + } + fns := d.searchFns[exact] + fn := typeFunc{Extra: extra, Fn: fv} + fns = append(fns, fn) + d.searchFns[exact] = fns + } + return nil +} + +// typeFunc holds information about a describer function and the types it accepts +type typeFunc struct { + Extra []reflect.Type + Fn reflect.Value +} + +// Matches returns true when the passed types exactly match the Extra list. +func (fn typeFunc) Matches(types []reflect.Type) bool { + if len(fn.Extra) != len(types) { + return false + } + // reorder the items in array types and fn.Extra + // convert the type into string and sort them, check if they are matched + varMap := make(map[reflect.Type]bool) + for i := range fn.Extra { + varMap[fn.Extra[i]] = true + } + for i := range types { + if _, found := varMap[types[i]]; !found { + return false + } + } + return true +} + +// Describe invokes the nested function with the exact number of arguments. +func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) { + values := []reflect.Value{reflect.ValueOf(exact)} + for _, obj := range extra { + values = append(values, reflect.ValueOf(obj)) + } + out := fn.Fn.Call(values) + s := out[0].Interface().(string) + var err error + if !out[1].IsNil() { + err = out[1].Interface().(error) + } + return s, err +} + +// printLabelsMultiline prints multiple labels with a proper alignment. +func printLabelsMultiline(w PrefixWriter, title string, labels map[string]string) { + printLabelsMultilineWithIndent(w, "", title, "\t", labels, sets.NewString()) +} + +// printLabelsMultiline prints multiple labels with a user-defined alignment. +func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, labels map[string]string, skip sets.String) { + w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) + + if len(labels) == 0 { + w.WriteLine("") + return + } + + // to print labels in the sorted order + keys := make([]string, 0, len(labels)) + for key := range labels { + if skip.Has(key) { + continue + } + keys = append(keys, key) + } + if len(keys) == 0 { + w.WriteLine("") + return + } + sort.Strings(keys) + + for i, key := range keys { + if i != 0 { + w.Write(LEVEL_0, "%s", initialIndent) + w.Write(LEVEL_0, "%s", innerIndent) + } + w.Write(LEVEL_0, "%s=%s\n", key, labels[key]) + } +} + +// printTaintsMultiline prints multiple taints with a proper alignment. +func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) { + printTaintsMultilineWithIndent(w, "", title, "\t", taints) +} + +// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. +func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) { + w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) + + if len(taints) == 0 { + w.WriteLine("") + return + } + + // to print taints in the sorted order + sort.Slice(taints, func(i, j int) bool { + cmpKey := func(taint corev1.Taint) string { + return string(taint.Effect) + "," + taint.Key + } + return cmpKey(taints[i]) < cmpKey(taints[j]) + }) + + for i, taint := range taints { + if i != 0 { + w.Write(LEVEL_0, "%s", initialIndent) + w.Write(LEVEL_0, "%s", innerIndent) + } + w.Write(LEVEL_0, "%s\n", taint.ToString()) + } +} + +// printPodsMultiline prints multiple pods with a proper alignment. +func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) { + printPodsMultilineWithIndent(w, "", title, "\t", pods) +} + +// printPodsMultilineWithIndent prints multiple pods with a user-defined alignment. +func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) { + w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) + + if len(pods) == 0 { + w.WriteLine("") + return + } + + // to print pods in the sorted order + sort.Slice(pods, func(i, j int) bool { + cmpKey := func(pod corev1.Pod) string { + return pod.Name + } + return cmpKey(pods[i]) < cmpKey(pods[j]) + }) + + for i, pod := range pods { + if i != 0 { + w.Write(LEVEL_0, "%s", initialIndent) + w.Write(LEVEL_0, "%s", innerIndent) + } + w.Write(LEVEL_0, "%s\n", pod.Name) + } +} + +// printPodTolerationsMultiline prints multiple tolerations with a proper alignment. +func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) { + printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations) +} + +// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment. +func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) { + w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) + + if len(tolerations) == 0 { + w.WriteLine("") + return + } + + // to print tolerations in the sorted order + sort.Slice(tolerations, func(i, j int) bool { + return tolerations[i].Key < tolerations[j].Key + }) + + for i, toleration := range tolerations { + if i != 0 { + w.Write(LEVEL_0, "%s", initialIndent) + w.Write(LEVEL_0, "%s", innerIndent) + } + w.Write(LEVEL_0, "%s", toleration.Key) + if len(toleration.Value) != 0 { + w.Write(LEVEL_0, "=%s", toleration.Value) + } + if len(toleration.Effect) != 0 { + w.Write(LEVEL_0, ":%s", toleration.Effect) + } + // tolerations: + // - operator: "Exists" + // is a special case which tolerates everything + if toleration.Operator == corev1.TolerationOpExists && len(toleration.Value) == 0 { + if len(toleration.Key) != 0 || len(toleration.Effect) != 0 { + w.Write(LEVEL_0, " op=Exists") + } else { + w.Write(LEVEL_0, "op=Exists") + } + } + + if toleration.TolerationSeconds != nil { + w.Write(LEVEL_0, " for %ds", *toleration.TolerationSeconds) + } + w.Write(LEVEL_0, "\n") + } +} + +type flusher interface { + Flush() +} + +func tabbedString(f func(io.Writer) error) (string, error) { + out := new(tabwriter.Writer) + buf := &bytes.Buffer{} + out.Init(buf, 0, 8, 2, ' ', 0) + + err := f(out) + if err != nil { + return "", err + } + + out.Flush() + str := string(buf.String()) + return str, nil +} + +type SortableResourceNames []corev1.ResourceName + +func (list SortableResourceNames) Len() int { + return len(list) +} + +func (list SortableResourceNames) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableResourceNames) Less(i, j int) bool { + return list[i] < list[j] +} + +// SortedResourceNames returns the sorted resource names of a resource list. +func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName { + resources := make([]corev1.ResourceName, 0, len(list)) + for res := range list { + resources = append(resources, res) + } + sort.Sort(SortableResourceNames(resources)) + return resources +} + +type SortableResourceQuotas []corev1.ResourceQuota + +func (list SortableResourceQuotas) Len() int { + return len(list) +} + +func (list SortableResourceQuotas) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableResourceQuotas) Less(i, j int) bool { + return list[i].Name < list[j].Name +} + +type SortableVolumeMounts []corev1.VolumeMount + +func (list SortableVolumeMounts) Len() int { + return len(list) +} + +func (list SortableVolumeMounts) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableVolumeMounts) Less(i, j int) bool { + return list[i].MountPath < list[j].MountPath +} + +type SortableVolumeDevices []corev1.VolumeDevice + +func (list SortableVolumeDevices) Len() int { + return len(list) +} + +func (list SortableVolumeDevices) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableVolumeDevices) Less(i, j int) bool { + return list[i].DevicePath < list[j].DevicePath +} + +var maxAnnotationLen = 140 + +// printAnnotationsMultiline prints multiple annotations with a proper alignment. +// If annotation string is too long, we omit chars more than 200 length. +func printAnnotationsMultiline(w PrefixWriter, title string, annotations map[string]string) { + w.Write(LEVEL_0, "%s:\t", title) + + // to print labels in the sorted order + keys := make([]string, 0, len(annotations)) + for key := range annotations { + if skipAnnotations.Has(key) { + continue + } + keys = append(keys, key) + } + if len(keys) == 0 { + w.WriteLine("") + return + } + sort.Strings(keys) + indent := "\t" + for i, key := range keys { + if i != 0 { + w.Write(LEVEL_0, indent) + } + value := strings.TrimSuffix(annotations[key], "\n") + if (len(value)+len(key)+2) > maxAnnotationLen || strings.Contains(value, "\n") { + w.Write(LEVEL_0, "%s:\n", key) + for _, s := range strings.Split(value, "\n") { + w.Write(LEVEL_0, "%s %s\n", indent, shorten(s, maxAnnotationLen-2)) + } + } else { + w.Write(LEVEL_0, "%s: %s\n", key, value) + } + } +} + +func shorten(s string, maxLength int) string { + if len(s) > maxLength { + return s[:maxLength] + "..." + } + return s +} + +// translateMicroTimestampSince returns the elapsed time since timestamp in +// human-readable approximation. +func translateMicroTimestampSince(timestamp metav1.MicroTime) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Since(timestamp.Time)) +} + +// translateTimestampSince returns the elapsed time since timestamp in +// human-readable approximation. +func translateTimestampSince(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Since(timestamp.Time)) +} + +// Pass ports=nil for all ports. +func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { + if len(endpoints.Subsets) == 0 { + return "" + } + list := []string{} + max := 3 + more := false + count := 0 + for i := range endpoints.Subsets { + ss := &endpoints.Subsets[i] + if len(ss.Ports) == 0 { + // It's possible to have headless services with no ports. + for i := range ss.Addresses { + if len(list) == max { + more = true + } + if !more { + list = append(list, ss.Addresses[i].IP) + } + count++ + } + } else { + // "Normal" services with ports defined. + for i := range ss.Ports { + port := &ss.Ports[i] + if ports == nil || ports.Has(port.Name) { + for i := range ss.Addresses { + if len(list) == max { + more = true + } + addr := &ss.Addresses[i] + if !more { + hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) + list = append(list, hostPort) + } + count++ + } + } + } + } + } + ret := strings.Join(list, ",") + if more { + return fmt.Sprintf("%s + %d more...", ret, count-max) + } + return ret +} + +func extractCSRStatus(conditions []string, certificateBytes []byte) string { + var approved, denied, failed bool + for _, c := range conditions { + switch c { + case string(certificatesv1beta1.CertificateApproved): + approved = true + case string(certificatesv1beta1.CertificateDenied): + denied = true + case string(certificatesv1beta1.CertificateFailed): + failed = true + } + } + var status string + // must be in order of precedence + if denied { + status += "Denied" + } else if approved { + status += "Approved" + } else { + status += "Pending" + } + if failed { + status += ",Failed" + } + if len(certificateBytes) > 0 { + status += ",Issued" + } + return status +} + +// backendStringer behaves just like a string interface and converts the given backend to a string. +func serviceBackendStringer(backend *networkingv1.IngressServiceBackend) string { + if backend == nil { + return "" + } + var bPort string + if backend.Port.Number != 0 { + sNum := int64(backend.Port.Number) + bPort = strconv.FormatInt(sNum, 10) + } else { + bPort = backend.Port.Name + } + return fmt.Sprintf("%v:%v", backend.Name, bPort) +} + +// backendStringer behaves just like a string interface and converts the given backend to a string. +func backendStringer(backend *networkingv1beta1.IngressBackend) string { + if backend == nil { + return "" + } + return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) +} + +// findNodeRoles returns the roles of a given node. +// The roles are determined by looking for: +// * a node-role.kubernetes.io/="" label +// * a kubernetes.io/role="" label +func findNodeRoles(node *corev1.Node) []string { + roles := sets.NewString() + for k, v := range node.Labels { + switch { + case strings.HasPrefix(k, LabelNodeRolePrefix): + if role := strings.TrimPrefix(k, LabelNodeRolePrefix); len(role) > 0 { + roles.Insert(role) + } + + case k == NodeLabelRole && v != "": + roles.Insert(v) + } + } + return roles.List() +} + +// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. +// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. +func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string { + ingress := s.Ingress + result := sets.NewString() + for i := range ingress { + if ingress[i].IP != "" { + result.Insert(ingress[i].IP) + } else if ingress[i].Hostname != "" { + result.Insert(ingress[i].Hostname) + } + } + + r := strings.Join(result.List(), ",") + if !wide && len(r) > LoadBalancerWidth { + r = r[0:(LoadBalancerWidth-3)] + "..." + } + return r +} + +// searchEvents finds events about the specified object. +// It is very similar to CoreV1.Events.Search, but supports the Limit parameter. +func searchEvents(client corev1client.EventsGetter, objOrRef runtime.Object, limit int64) (*corev1.EventList, error) { + ref, err := reference.GetReference(scheme.Scheme, objOrRef) + if err != nil { + return nil, err + } + stringRefKind := string(ref.Kind) + var refKind *string + if len(stringRefKind) > 0 { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if len(stringRefUID) > 0 { + refUID = &stringRefUID + } + + e := client.Events(ref.Namespace) + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + initialOpts := metav1.ListOptions{FieldSelector: fieldSelector.String(), Limit: limit} + eventList := &corev1.EventList{} + err = runtimeresource.FollowContinue(&initialOpts, + func(options metav1.ListOptions) (runtime.Object, error) { + newEvents, err := e.List(context.TODO(), options) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, options, "events") + } + eventList.Items = append(eventList.Items, newEvents.Items...) + return newEvents, nil + }) + return eventList, err +} diff --git a/vendor/k8s.io/kubectl/pkg/describe/interface.go b/vendor/k8s.io/kubectl/pkg/describe/interface.go new file mode 100644 index 000000000..180821e1c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/describe/interface.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package describe + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +const ( + // LoadBalancerWidth is the width how we describe load balancer + LoadBalancerWidth = 16 + + // LabelNodeRolePrefix is a label prefix for node roles + // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 + LabelNodeRolePrefix = "node-role.kubernetes.io/" + + // NodeLabelRole specifies the role of a node + NodeLabelRole = "kubernetes.io/role" +) + +// DescriberFunc gives a way to display the specified RESTMapping type +type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (ResourceDescriber, error) + +// ResourceDescriber generates output for the named resource or an error +// if the output could not be generated. Implementers typically +// abstract the retrieval of the named object from a remote server. +type ResourceDescriber interface { + Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) +} + +// DescriberSettings holds display configuration for each object +// describer to control what is printed. +type DescriberSettings struct { + ShowEvents bool + ChunkSize int64 +} + +// ObjectDescriber is an interface for displaying arbitrary objects with extra +// information. Use when an object is in hand (on disk, or already retrieved). +// Implementers may ignore the additional information passed on extra, or use it +// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer +// is found. +type ObjectDescriber interface { + DescribeObject(object interface{}, extra ...interface{}) (output string, err error) +} + +// ErrNoDescriber is a structured error indicating the provided object or objects +// cannot be described. +type ErrNoDescriber struct { + Types []string +} + +// Error implements the error interface. +func (e ErrNoDescriber) Error() string { + return fmt.Sprintf("no describer has been defined for %v", e.Types) +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/attachablepodforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/attachablepodforobject.go new file mode 100644 index 000000000..7b18111fd --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/attachablepodforobject.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubectl/pkg/util/podutils" +) + +// attachablePodForObject returns the pod to which to attach given an object. +func attachablePodForObject(restClientGetter genericclioptions.RESTClientGetter, object runtime.Object, timeout time.Duration) (*corev1.Pod, error) { + switch t := object.(type) { + case *corev1.Pod: + return t, nil + } + + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + clientset, err := corev1client.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + namespace, selector, err := SelectorsForObject(object) + if err != nil { + return nil, fmt.Errorf("cannot attach to %T: %v", object, err) + } + sortBy := func(pods []*corev1.Pod) sort.Interface { return sort.Reverse(podutils.ActivePods(pods)) } + pod, _, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy) + return pod, err +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/canbeexposed.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/canbeexposed.go new file mode 100644 index 000000000..b232ff853 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/canbeexposed.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Check whether the kind of resources could be exposed +func canBeExposed(kind schema.GroupKind) error { + switch kind { + case + corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): + // nothing to do here + default: + return fmt.Errorf("cannot expose a %s", kind) + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go new file mode 100644 index 000000000..762d953f2 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go @@ -0,0 +1,191 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "context" + "fmt" + "sort" + "time" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + coreclient "k8s.io/client-go/kubernetes/typed/core/v1" + watchtools "k8s.io/client-go/tools/watch" +) + +// GetFirstPod returns a pod matching the namespace and label selector +// and the number of all pods that match the label selector. +func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error) { + options := metav1.ListOptions{LabelSelector: selector} + + podList, err := client.Pods(namespace).List(context.TODO(), options) + if err != nil { + return nil, 0, err + } + pods := []*corev1.Pod{} + for i := range podList.Items { + pod := podList.Items[i] + pods = append(pods, &pod) + } + if len(pods) > 0 { + sort.Sort(sortBy(pods)) + return pods[0], len(podList.Items), nil + } + + // Watch until we observe a pod + options.ResourceVersion = podList.ResourceVersion + w, err := client.Pods(namespace).Watch(context.TODO(), options) + if err != nil { + return nil, 0, err + } + defer w.Stop() + + condition := func(event watch.Event) (bool, error) { + return event.Type == watch.Added || event.Type == watch.Modified, nil + } + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + event, err := watchtools.UntilWithoutRetry(ctx, w, condition) + if err != nil { + return nil, 0, err + } + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return nil, 0, fmt.Errorf("%#v is not a pod event", event) + } + return pod, 1, nil +} + +// SelectorsForObject returns the pod label selector for a given object +func SelectorsForObject(object runtime.Object) (namespace string, selector labels.Selector, err error) { + switch t := object.(type) { + case *extensionsv1beta1.ReplicaSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1.ReplicaSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta2.ReplicaSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + + case *corev1.ReplicationController: + namespace = t.Namespace + selector = labels.SelectorFromSet(t.Spec.Selector) + + case *appsv1.StatefulSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta1.StatefulSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta2.StatefulSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + + case *extensionsv1beta1.DaemonSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1.DaemonSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta2.DaemonSet: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + + case *extensionsv1beta1.Deployment: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1.Deployment: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta1.Deployment: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + case *appsv1beta2.Deployment: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + + case *batchv1.Job: + namespace = t.Namespace + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector: %v", err) + } + + case *corev1.Service: + namespace = t.Namespace + if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 { + return "", nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name) + } + selector = labels.SelectorFromSet(t.Spec.Selector) + + default: + return "", nil, fmt.Errorf("selector for %T not implemented", object) + } + + return namespace, selector, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/history.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/history.go new file mode 100644 index 000000000..139cc8f81 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/history.go @@ -0,0 +1,410 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "bytes" + "context" + "fmt" + "io" + "text/tabwriter" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes" + clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + "k8s.io/kubectl/pkg/apps" + "k8s.io/kubectl/pkg/describe" + deploymentutil "k8s.io/kubectl/pkg/util/deployment" + sliceutil "k8s.io/kubectl/pkg/util/slice" +) + +const ( + ChangeCauseAnnotation = "kubernetes.io/change-cause" +) + +// HistoryViewer provides an interface for resources have historical information. +type HistoryViewer interface { + ViewHistory(namespace, name string, revision int64) (string, error) +} + +type HistoryVisitor struct { + clientset kubernetes.Interface + result HistoryViewer +} + +func (v *HistoryVisitor) VisitDeployment(elem apps.GroupKindElement) { + v.result = &DeploymentHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitStatefulSet(kind apps.GroupKindElement) { + v.result = &StatefulSetHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitDaemonSet(kind apps.GroupKindElement) { + v.result = &DaemonSetHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitJob(kind apps.GroupKindElement) {} +func (v *HistoryVisitor) VisitPod(kind apps.GroupKindElement) {} +func (v *HistoryVisitor) VisitReplicaSet(kind apps.GroupKindElement) {} +func (v *HistoryVisitor) VisitReplicationController(kind apps.GroupKindElement) {} +func (v *HistoryVisitor) VisitCronJob(kind apps.GroupKindElement) {} + +// HistoryViewerFor returns an implementation of HistoryViewer interface for the given schema kind +func HistoryViewerFor(kind schema.GroupKind, c kubernetes.Interface) (HistoryViewer, error) { + elem := apps.GroupKindElement(kind) + visitor := &HistoryVisitor{ + clientset: c, + } + + // Determine which HistoryViewer we need here + err := elem.Accept(visitor) + + if err != nil { + return nil, fmt.Errorf("error retrieving history for %q, %v", kind.String(), err) + } + + if visitor.result == nil { + return nil, fmt.Errorf("no history viewer has been implemented for %q", kind.String()) + } + + return visitor.result, nil +} + +type DeploymentHistoryViewer struct { + c kubernetes.Interface +} + +// ViewHistory returns a revision-to-replicaset map as the revision history of a deployment +// TODO: this should be a describer +func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { + versionedAppsClient := h.c.AppsV1() + deployment, err := versionedAppsClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) + } + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedAppsClient) + if err != nil { + return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err) + } + allRSs := allOldRSs + if newRS != nil { + allRSs = append(allRSs, newRS) + } + + historyInfo := make(map[int64]*corev1.PodTemplateSpec) + for _, rs := range allRSs { + v, err := deploymentutil.Revision(rs) + if err != nil { + continue + } + historyInfo[v] = &rs.Spec.Template + changeCause := getChangeCause(rs) + if historyInfo[v].Annotations == nil { + historyInfo[v].Annotations = make(map[string]string) + } + if len(changeCause) > 0 { + historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause + } + } + + if len(historyInfo) == 0 { + return "No rollout history found.", nil + } + + if revision > 0 { + // Print details of a specific revision + template, ok := historyInfo[revision] + if !ok { + return "", fmt.Errorf("unable to find the specified revision") + } + return printTemplate(template) + } + + // Sort the revisionToChangeCause map by revision + revisions := make([]int64, 0, len(historyInfo)) + for r := range historyInfo { + revisions = append(revisions, r) + } + sliceutil.SortInts64(revisions) + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") + for _, r := range revisions { + // Find the change-cause of revision r + changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] + if len(changeCause) == 0 { + changeCause = "" + } + fmt.Fprintf(out, "%d\t%s\n", r, changeCause) + } + return nil + }) +} + +func printTemplate(template *corev1.PodTemplateSpec) (string, error) { + buf := bytes.NewBuffer([]byte{}) + w := describe.NewPrefixWriter(buf) + describe.DescribePodTemplate(template, w) + return buf.String(), nil +} + +type DaemonSetHistoryViewer struct { + c kubernetes.Interface +} + +// ViewHistory returns a revision-to-history map as the revision history of a deployment +// TODO: this should be a describer +func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { + ds, history, err := daemonSetHistory(h.c.AppsV1(), namespace, name) + if err != nil { + return "", err + } + return printHistory(history, revision, func(history *appsv1.ControllerRevision) (*corev1.PodTemplateSpec, error) { + dsOfHistory, err := applyDaemonSetHistory(ds, history) + if err != nil { + return nil, err + } + return &dsOfHistory.Spec.Template, err + }) +} + +// printHistory returns the podTemplate of the given revision if it is non-zero +// else returns the overall revisions +func printHistory(history []*appsv1.ControllerRevision, revision int64, getPodTemplate func(history *appsv1.ControllerRevision) (*corev1.PodTemplateSpec, error)) (string, error) { + historyInfo := make(map[int64]*appsv1.ControllerRevision) + for _, history := range history { + // TODO: for now we assume revisions don't overlap, we may need to handle it + historyInfo[history.Revision] = history + } + if len(historyInfo) == 0 { + return "No rollout history found.", nil + } + + // Print details of a specific revision + if revision > 0 { + history, ok := historyInfo[revision] + if !ok { + return "", fmt.Errorf("unable to find the specified revision") + } + podTemplate, err := getPodTemplate(history) + if err != nil { + return "", fmt.Errorf("unable to parse history %s", history.Name) + } + return printTemplate(podTemplate) + } + + // Print an overview of all Revisions + // Sort the revisionToChangeCause map by revision + revisions := make([]int64, 0, len(historyInfo)) + for r := range historyInfo { + revisions = append(revisions, r) + } + sliceutil.SortInts64(revisions) + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") + for _, r := range revisions { + // Find the change-cause of revision r + changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] + if len(changeCause) == 0 { + changeCause = "" + } + fmt.Fprintf(out, "%d\t%s\n", r, changeCause) + } + return nil + }) +} + +type StatefulSetHistoryViewer struct { + c kubernetes.Interface +} + +// ViewHistory returns a list of the revision history of a statefulset +// TODO: this should be a describer +func (h *StatefulSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { + sts, history, err := statefulSetHistory(h.c.AppsV1(), namespace, name) + if err != nil { + return "", err + } + return printHistory(history, revision, func(history *appsv1.ControllerRevision) (*corev1.PodTemplateSpec, error) { + stsOfHistory, err := applyStatefulSetHistory(sts, history) + if err != nil { + return nil, err + } + return &stsOfHistory.Spec.Template, err + }) +} + +// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor +// TODO: Rename this to controllerHistory when other controllers have been upgraded +func controlledHistoryV1( + apps clientappsv1.AppsV1Interface, + namespace string, + selector labels.Selector, + accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { + var result []*appsv1.ControllerRevision + historyList, err := apps.ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + for i := range historyList.Items { + history := historyList.Items[i] + // Only add history that belongs to the API object + if metav1.IsControlledBy(&history, accessor) { + result = append(result, &history) + } + } + return result, nil +} + +// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor +func controlledHistory( + apps clientappsv1.AppsV1Interface, + namespace string, + selector labels.Selector, + accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { + var result []*appsv1.ControllerRevision + historyList, err := apps.ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + for i := range historyList.Items { + history := historyList.Items[i] + // Only add history that belongs to the API object + if metav1.IsControlledBy(&history, accessor) { + result = append(result, &history) + } + } + return result, nil +} + +// daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history. +func daemonSetHistory( + apps clientappsv1.AppsV1Interface, + namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) { + ds, err := apps.DaemonSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) + } + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, nil, fmt.Errorf("failed to create selector for DaemonSet %s: %v", ds.Name, err) + } + accessor, err := meta.Accessor(ds) + if err != nil { + return nil, nil, fmt.Errorf("failed to create accessor for DaemonSet %s: %v", ds.Name, err) + } + history, err := controlledHistory(apps, ds.Namespace, selector, accessor) + if err != nil { + return nil, nil, fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", ds.Name, err) + } + return ds, history, nil +} + +// statefulSetHistory returns the StatefulSet named name in namespace and all ControllerRevisions in its history. +func statefulSetHistory( + apps clientappsv1.AppsV1Interface, + namespace, name string) (*appsv1.StatefulSet, []*appsv1.ControllerRevision, error) { + sts, err := apps.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve Statefulset %s: %s", name, err.Error()) + } + selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector) + if err != nil { + return nil, nil, fmt.Errorf("failed to create selector for StatefulSet %s: %s", name, err.Error()) + } + accessor, err := meta.Accessor(sts) + if err != nil { + return nil, nil, fmt.Errorf("failed to obtain accessor for StatefulSet %s: %s", name, err.Error()) + } + history, err := controlledHistoryV1(apps, namespace, selector, accessor) + if err != nil { + return nil, nil, fmt.Errorf("unable to find history controlled by StatefulSet %s: %v", name, err) + } + return sts, history, nil +} + +// applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet +func applyDaemonSetHistory(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (*appsv1.DaemonSet, error) { + dsBytes, err := json.Marshal(ds) + if err != nil { + return nil, err + } + patched, err := strategicpatch.StrategicMergePatch(dsBytes, history.Data.Raw, ds) + if err != nil { + return nil, err + } + result := &appsv1.DaemonSet{} + err = json.Unmarshal(patched, result) + if err != nil { + return nil, err + } + return result, nil +} + +// applyStatefulSetHistory returns a specific revision of StatefulSet by applying the given history to a copy of the given StatefulSet +func applyStatefulSetHistory(sts *appsv1.StatefulSet, history *appsv1.ControllerRevision) (*appsv1.StatefulSet, error) { + stsBytes, err := json.Marshal(sts) + if err != nil { + return nil, err + } + patched, err := strategicpatch.StrategicMergePatch(stsBytes, history.Data.Raw, sts) + if err != nil { + return nil, err + } + result := &appsv1.StatefulSet{} + err = json.Unmarshal(patched, result) + if err != nil { + return nil, err + } + return result, nil +} + +// TODO: copied here until this becomes a describer +func tabbedString(f func(io.Writer) error) (string, error) { + out := new(tabwriter.Writer) + buf := &bytes.Buffer{} + out.Init(buf, 0, 8, 2, ' ', 0) + + err := f(out) + if err != nil { + return "", err + } + + out.Flush() + str := string(buf.String()) + return str, nil +} + +// getChangeCause returns the change-cause annotation of the input object +func getChangeCause(obj runtime.Object) string { + accessor, err := meta.Accessor(obj) + if err != nil { + return "" + } + return accessor.GetAnnotations()[ChangeCauseAnnotation] +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/historyviewer.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/historyviewer.go new file mode 100644 index 000000000..6ad9d217c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/historyviewer.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" +) + +// historyViewer Returns a HistoryViewer for viewing change history +func historyViewer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (HistoryViewer, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + external, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + return HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), external) +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go new file mode 100644 index 000000000..83e13714f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/interface.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/rest" +) + +// LogsForObjectFunc is a function type that can tell you how to get logs for a runtime.object +type LogsForObjectFunc func(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[v1.ObjectReference]rest.ResponseWrapper, error) + +// LogsForObjectFn gives a way to easily override the function for unit testing if needed. +var LogsForObjectFn LogsForObjectFunc = logsForObject + +// AttachablePodForObjectFunc is a function type that can tell you how to get the pod for which to attach a given object +type AttachablePodForObjectFunc func(restClientGetter genericclioptions.RESTClientGetter, object runtime.Object, timeout time.Duration) (*v1.Pod, error) + +// AttachablePodForObjectFn gives a way to easily override the function for unit testing if needed. +var AttachablePodForObjectFn AttachablePodForObjectFunc = attachablePodForObject + +// HistoryViewerFunc is a function type that can tell you how to view change history +type HistoryViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (HistoryViewer, error) + +// HistoryViewerFn gives a way to easily override the function for unit testing if needed +var HistoryViewerFn HistoryViewerFunc = historyViewer + +// StatusViewerFunc is a function type that can tell you how to print rollout status +type StatusViewerFunc func(mapping *meta.RESTMapping) (StatusViewer, error) + +// StatusViewerFn gives a way to easily override the function for unit testing if needed +var StatusViewerFn StatusViewerFunc = statusViewer + +// UpdatePodSpecForObjectFunc will call the provided function on the pod spec this object supports, +// return false if no pod spec is supported, or return an error. +type UpdatePodSpecForObjectFunc func(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) + +// UpdatePodSpecForObjectFn gives a way to easily override the function for unit testing if needed +var UpdatePodSpecForObjectFn UpdatePodSpecForObjectFunc = updatePodSpecForObject + +// MapBasedSelectorForObjectFunc will call the provided function on mapping the baesd selector for object, +// return "" if object is not supported, or return an error. +type MapBasedSelectorForObjectFunc func(object runtime.Object) (string, error) + +// MapBasedSelectorForObjectFn gives a way to easily override the function for unit testing if needed +var MapBasedSelectorForObjectFn MapBasedSelectorForObjectFunc = mapBasedSelectorForObject + +// ProtocolsForObjectFunc will call the provided function on the protocols for the object, +// return nil-map if no protocols for the object, or return an error. +type ProtocolsForObjectFunc func(object runtime.Object) (map[string]string, error) + +// ProtocolsForObjectFn gives a way to easily override the function for unit testing if needed +var ProtocolsForObjectFn ProtocolsForObjectFunc = protocolsForObject + +// PortsForObjectFunc returns the ports associated with the provided object +type PortsForObjectFunc func(object runtime.Object) ([]string, error) + +// PortsForObjectFn gives a way to easily override the function for unit testing if needed +var PortsForObjectFn PortsForObjectFunc = portsForObject + +// CanBeExposedFunc is a function type that can tell you whether a given GroupKind is capable of being exposed +type CanBeExposedFunc func(kind schema.GroupKind) error + +// CanBeExposedFn gives a way to easily override the function for unit testing if needed +var CanBeExposedFn CanBeExposedFunc = canBeExposed + +// ObjectPauserFunc is a function type that marks the object in a given info as paused. +type ObjectPauserFunc func(runtime.Object) ([]byte, error) + +// ObjectPauserFn gives a way to easily override the function for unit testing if needed. +// Returns the patched object in bytes and any error that occurred during the encoding or +// in case the object is already paused. +var ObjectPauserFn ObjectPauserFunc = defaultObjectPauser + +// ObjectResumerFunc is a function type that marks the object in a given info as resumed. +type ObjectResumerFunc func(runtime.Object) ([]byte, error) + +// ObjectResumerFn gives a way to easily override the function for unit testing if needed. +// Returns the patched object in bytes and any error that occurred during the encoding or +// in case the object is already resumed. +var ObjectResumerFn ObjectResumerFunc = defaultObjectResumer + +// RollbackerFunc gives a way to change the rollback version of the specified RESTMapping type +type RollbackerFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Rollbacker, error) + +// RollbackerFn gives a way to easily override the function for unit testing if needed +var RollbackerFn RollbackerFunc = rollbacker + +// ObjectRestarterFunc is a function type that updates an annotation in a deployment to restart it.. +type ObjectRestarterFunc func(runtime.Object) ([]byte, error) + +// ObjectRestarterFn gives a way to easily override the function for unit testing if needed. +// Returns the patched object in bytes and any error that occurred during the encoding. +var ObjectRestarterFn ObjectRestarterFunc = defaultObjectRestarter diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go new file mode 100644 index 000000000..7f5a275b0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go @@ -0,0 +1,210 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + "os" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/reference" + "k8s.io/kubectl/pkg/cmd/util/podcmd" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/podutils" +) + +// defaultLogsContainerAnnotationName is an annotation name that can be used to preselect the interesting container +// from a pod when running kubectl logs. It is deprecated and will be remove in 1.25. +const defaultLogsContainerAnnotationName = "kubectl.kubernetes.io/default-logs-container" + +func logsForObject(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + clientset, err := corev1client.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + return logsForObjectWithClient(clientset, object, options, timeout, allContainers) +} + +// this is split for easy test-ability +func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, options runtime.Object, timeout time.Duration, allContainers bool) (map[corev1.ObjectReference]rest.ResponseWrapper, error) { + opts, ok := options.(*corev1.PodLogOptions) + if !ok { + return nil, errors.New("provided options object is not a PodLogOptions") + } + + switch t := object.(type) { + case *corev1.PodList: + ret := make(map[corev1.ObjectReference]rest.ResponseWrapper) + for i := range t.Items { + currRet, err := logsForObjectWithClient(clientset, &t.Items[i], options, timeout, allContainers) + if err != nil { + return nil, err + } + for k, v := range currRet { + ret[k] = v + } + } + return ret, nil + + case *corev1.Pod: + // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false + if !allContainers { + currOpts := new(corev1.PodLogOptions) + if opts != nil { + opts.DeepCopyInto(currOpts) + } + // in case the "kubectl.kubernetes.io/default-container" annotation is present, we preset the opts.Containers to default to selected + // container. This gives users ability to preselect the most interesting container in pod. + if annotations := t.GetAnnotations(); annotations != nil && currOpts.Container == "" { + var defaultContainer string + if len(annotations[podcmd.DefaultContainerAnnotationName]) > 0 { + defaultContainer = annotations[podcmd.DefaultContainerAnnotationName] + } else if len(annotations[defaultLogsContainerAnnotationName]) > 0 { + // Only log deprecation if we have only the old annotation. This allows users to + // set both to support multiple versions of kubectl; if they are setting both + // they must already know it is deprecated, so we don't need to add noisy + // warnings. + defaultContainer = annotations[defaultLogsContainerAnnotationName] + fmt.Fprintf(os.Stderr, "Using deprecated annotation `kubectl.kubernetes.io/default-logs-container` in pod/%v. Please use `kubectl.kubernetes.io/default-container` instead\n", t.Name) + } + if len(defaultContainer) > 0 { + if exists, _ := podcmd.FindContainerByName(t, defaultContainer); exists == nil { + fmt.Fprintf(os.Stderr, "Default container name %q not found in pod %s\n", defaultContainer, t.Name) + } else { + currOpts.Container = defaultContainer + } + } + } + + if currOpts.Container == "" { + // We don't know container name. In this case we expect only one container to be present in the pod (ignoring InitContainers). + // If there is more than one container, we should return an error showing all container names. + if len(t.Spec.Containers) != 1 { + containerNames := getContainerNames(t.Spec.Containers) + initContainerNames := getContainerNames(t.Spec.InitContainers) + ephemeralContainerNames := getContainerNames(ephemeralContainersToContainers(t.Spec.EphemeralContainers)) + err := fmt.Sprintf("a container name must be specified for pod %s, choose one of: [%s]", t.Name, containerNames) + if len(initContainerNames) > 0 { + err += fmt.Sprintf(" or one of the init containers: [%s]", initContainerNames) + } + if len(ephemeralContainerNames) > 0 { + err += fmt.Sprintf(" or one of the ephemeral containers: [%s]", ephemeralContainerNames) + } + + return nil, errors.New(err) + } + currOpts.Container = t.Spec.Containers[0].Name + } + + container, fieldPath := podcmd.FindContainerByName(t, currOpts.Container) + if container == nil { + return nil, fmt.Errorf("container %s is not valid for pod %s", currOpts.Container, t.Name) + } + ref, err := reference.GetPartialReference(scheme.Scheme, t, fieldPath) + if err != nil { + return nil, fmt.Errorf("Unable to construct reference to '%#v': %v", t, err) + } + + ret := make(map[corev1.ObjectReference]rest.ResponseWrapper, 1) + ret[*ref] = clientset.Pods(t.Namespace).GetLogs(t.Name, currOpts) + return ret, nil + } + + ret := make(map[corev1.ObjectReference]rest.ResponseWrapper) + for _, c := range t.Spec.InitContainers { + currOpts := opts.DeepCopy() + currOpts.Container = c.Name + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + if err != nil { + return nil, err + } + for k, v := range currRet { + ret[k] = v + } + } + for _, c := range t.Spec.Containers { + currOpts := opts.DeepCopy() + currOpts.Container = c.Name + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + if err != nil { + return nil, err + } + for k, v := range currRet { + ret[k] = v + } + } + for _, c := range t.Spec.EphemeralContainers { + currOpts := opts.DeepCopy() + currOpts.Container = c.Name + currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) + if err != nil { + return nil, err + } + for k, v := range currRet { + ret[k] = v + } + } + + return ret, nil + } + + namespace, selector, err := SelectorsForObject(object) + if err != nil { + return nil, fmt.Errorf("cannot get the logs from %T: %v", object, err) + } + + sortBy := func(pods []*corev1.Pod) sort.Interface { return podutils.ByLogging(pods) } + pod, numPods, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy) + if err != nil { + return nil, err + } + if numPods > 1 { + fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) + } + + return logsForObjectWithClient(clientset, pod, options, timeout, allContainers) +} + +// getContainerNames returns a formatted string containing the container names +func getContainerNames(containers []corev1.Container) string { + names := []string{} + for _, c := range containers { + names = append(names, c.Name) + } + return strings.Join(names, " ") +} + +func ephemeralContainersToContainers(containers []corev1.EphemeralContainer) []corev1.Container { + var ec []corev1.Container + for i := range containers { + ec = append(ec, corev1.Container(containers[i].EphemeralContainerCommon)) + } + return ec +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/mapbasedselectorforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/mapbasedselectorforobject.go new file mode 100644 index 000000000..729ae6d4f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/mapbasedselectorforobject.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +// mapBasedSelectorForObject returns the map-based selector associated with the provided object. If a +// new set-based selector is provided, an error is returned if the selector cannot be converted to a +// map-based selector +func mapBasedSelectorForObject(object runtime.Object) (string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *corev1.ReplicationController: + return MakeLabels(t.Spec.Selector), nil + + case *corev1.Pod: + if len(t.Labels) == 0 { + return "", fmt.Errorf("the pod has no labels and cannot be exposed") + } + return MakeLabels(t.Labels), nil + + case *corev1.Service: + if t.Spec.Selector == nil { + return "", fmt.Errorf("the service has no pod selector set") + } + return MakeLabels(t.Spec.Selector), nil + + case *extensionsv1beta1.Deployment: + // "extensions" deployments use pod template labels if selector is not set. + var labels map[string]string + if t.Spec.Selector != nil { + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + labels = t.Spec.Selector.MatchLabels + } else { + labels = t.Spec.Template.Labels + } + if len(labels) == 0 { + return "", fmt.Errorf("the deployment has no labels or selectors and cannot be exposed") + } + return MakeLabels(labels), nil + + case *appsv1.Deployment: + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") + } + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return MakeLabels(t.Spec.Selector.MatchLabels), nil + + case *appsv1beta2.Deployment: + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") + } + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return MakeLabels(t.Spec.Selector.MatchLabels), nil + + case *appsv1beta1.Deployment: + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") + } + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return MakeLabels(t.Spec.Selector.MatchLabels), nil + + case *extensionsv1beta1.ReplicaSet: + // "extensions" replicasets use pod template labels if selector is not set. + var labels map[string]string + if t.Spec.Selector != nil { + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + labels = t.Spec.Selector.MatchLabels + } else { + labels = t.Spec.Template.Labels + } + if len(labels) == 0 { + return "", fmt.Errorf("the replica set has no labels or selectors and cannot be exposed") + } + return MakeLabels(labels), nil + + case *appsv1.ReplicaSet: + // "apps" replicasets must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid replicaset: no selectors, therefore cannot be exposed") + } + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return MakeLabels(t.Spec.Selector.MatchLabels), nil + + case *appsv1beta2.ReplicaSet: + // "apps" replicasets must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid replicaset: no selectors, therefore cannot be exposed") + } + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return MakeLabels(t.Spec.Selector.MatchLabels), nil + + default: + return "", fmt.Errorf("cannot extract pod selector from %T", object) + } + +} + +func MakeLabels(labels map[string]string) string { + out := []string{} + for key, value := range labels { + out = append(out, fmt.Sprintf("%s=%s", key, value)) + } + return strings.Join(out, ",") +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectpauser.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectpauser.go new file mode 100644 index 000000000..f50daf255 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectpauser.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" +) + +// Currently only supports Deployments. +func defaultObjectPauser(obj runtime.Object) ([]byte, error) { + switch obj := obj.(type) { + case *extensionsv1beta1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("is already paused") + } + obj.Spec.Paused = true + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("is already paused") + } + obj.Spec.Paused = true + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.Deployment: + if obj.Spec.Paused { + return nil, errors.New("is already paused") + } + obj.Spec.Paused = true + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1beta1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("is already paused") + } + obj.Spec.Paused = true + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + default: + return nil, fmt.Errorf("pausing is not supported") + } +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectrestarter.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectrestarter.go new file mode 100644 index 000000000..cbcf7c882 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectrestarter.go @@ -0,0 +1,119 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" +) + +func defaultObjectRestarter(obj runtime.Object) ([]byte, error) { + switch obj := obj.(type) { + case *extensionsv1beta1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("can't restart paused deployment (run rollout resume first)") + } + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("can't restart paused deployment (run rollout resume first)") + } + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.Deployment: + if obj.Spec.Paused { + return nil, errors.New("can't restart paused deployment (run rollout resume first)") + } + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1beta1.Deployment: + if obj.Spec.Paused { + return nil, errors.New("can't restart paused deployment (run rollout resume first)") + } + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + case *extensionsv1beta1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + case *appsv1beta2.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + default: + return nil, fmt.Errorf("restarting is not supported") + } +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectresumer.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectresumer.go new file mode 100644 index 000000000..783da6125 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/objectresumer.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" +) + +func defaultObjectResumer(obj runtime.Object) ([]byte, error) { + switch obj := obj.(type) { + case *extensionsv1beta1.Deployment: + if !obj.Spec.Paused { + return nil, errors.New("is not paused") + } + obj.Spec.Paused = false + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.Deployment: + if !obj.Spec.Paused { + return nil, errors.New("is not paused") + } + obj.Spec.Paused = false + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.Deployment: + if !obj.Spec.Paused { + return nil, errors.New("is not paused") + } + obj.Spec.Paused = false + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1beta1.Deployment: + if !obj.Spec.Paused { + return nil, errors.New("is not paused") + } + obj.Spec.Paused = false + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + default: + return nil, fmt.Errorf("resuming is not supported") + } +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/portsforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/portsforobject.go new file mode 100644 index 000000000..6cc9a2a4e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/portsforobject.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func portsForObject(object runtime.Object) ([]string, error) { + switch t := object.(type) { + case *corev1.ReplicationController: + return getPorts(t.Spec.Template.Spec), nil + + case *corev1.Pod: + return getPorts(t.Spec), nil + + case *corev1.Service: + return getServicePorts(t.Spec), nil + + case *extensionsv1beta1.Deployment: + return getPorts(t.Spec.Template.Spec), nil + case *appsv1.Deployment: + return getPorts(t.Spec.Template.Spec), nil + case *appsv1beta2.Deployment: + return getPorts(t.Spec.Template.Spec), nil + case *appsv1beta1.Deployment: + return getPorts(t.Spec.Template.Spec), nil + + case *extensionsv1beta1.ReplicaSet: + return getPorts(t.Spec.Template.Spec), nil + case *appsv1.ReplicaSet: + return getPorts(t.Spec.Template.Spec), nil + case *appsv1beta2.ReplicaSet: + return getPorts(t.Spec.Template.Spec), nil + default: + return nil, fmt.Errorf("cannot extract ports from %T", object) + } +} + +func getPorts(spec corev1.PodSpec) []string { + result := []string{} + for _, container := range spec.Containers { + for _, port := range container.Ports { + result = append(result, strconv.Itoa(int(port.ContainerPort))) + } + } + return result +} + +func getServicePorts(spec corev1.ServiceSpec) []string { + result := []string{} + for _, servicePort := range spec.Ports { + result = append(result, strconv.Itoa(int(servicePort.Port))) + } + return result +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/protocolsforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/protocolsforobject.go new file mode 100644 index 000000000..2e5e5a208 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/protocolsforobject.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func protocolsForObject(object runtime.Object) (map[string]string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *corev1.ReplicationController: + return getProtocols(t.Spec.Template.Spec), nil + + case *corev1.Pod: + return getProtocols(t.Spec), nil + + case *corev1.Service: + return getServiceProtocols(t.Spec), nil + + case *extensionsv1beta1.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *appsv1.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *appsv1beta2.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *appsv1beta1.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + + case *extensionsv1beta1.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + case *appsv1.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + case *appsv1beta2.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + + default: + return nil, fmt.Errorf("cannot extract protocols from %T", object) + } +} + +func getProtocols(spec corev1.PodSpec) map[string]string { + result := make(map[string]string) + for _, container := range spec.Containers { + for _, port := range container.Ports { + // Empty protocol must be defaulted (TCP) + if len(port.Protocol) == 0 { + port.Protocol = corev1.ProtocolTCP + } + result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) + } + } + return result +} + +// Extracts the protocols exposed by a service from the given service spec. +func getServiceProtocols(spec corev1.ServiceSpec) map[string]string { + result := make(map[string]string) + for _, servicePort := range spec.Ports { + // Empty protocol must be defaulted (TCP) + if len(servicePort.Protocol) == 0 { + servicePort.Protocol = corev1.ProtocolTCP + } + result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) + } + return result +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go new file mode 100644 index 000000000..ed51da2fc --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go @@ -0,0 +1,500 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "bytes" + "context" + "fmt" + "sort" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/apps" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + deploymentutil "k8s.io/kubectl/pkg/util/deployment" +) + +const ( + rollbackSuccess = "rolled back" + rollbackSkipped = "skipped rollback" +) + +// Rollbacker provides an interface for resources that can be rolled back. +type Rollbacker interface { + Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) +} + +type RollbackVisitor struct { + clientset kubernetes.Interface + result Rollbacker +} + +func (v *RollbackVisitor) VisitDeployment(elem apps.GroupKindElement) { + v.result = &DeploymentRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitStatefulSet(kind apps.GroupKindElement) { + v.result = &StatefulSetRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitDaemonSet(kind apps.GroupKindElement) { + v.result = &DaemonSetRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitJob(kind apps.GroupKindElement) {} +func (v *RollbackVisitor) VisitPod(kind apps.GroupKindElement) {} +func (v *RollbackVisitor) VisitReplicaSet(kind apps.GroupKindElement) {} +func (v *RollbackVisitor) VisitReplicationController(kind apps.GroupKindElement) {} +func (v *RollbackVisitor) VisitCronJob(kind apps.GroupKindElement) {} + +// RollbackerFor returns an implementation of Rollbacker interface for the given schema kind +func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) { + elem := apps.GroupKindElement(kind) + visitor := &RollbackVisitor{ + clientset: c, + } + + err := elem.Accept(visitor) + + if err != nil { + return nil, fmt.Errorf("error retrieving rollbacker for %q, %v", kind.String(), err) + } + + if visitor.result == nil { + return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) + } + + return visitor.result, nil +} + +type DeploymentRollbacker struct { + c kubernetes.Interface +} + +func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + if toRevision < 0 { + return "", revisionNotFoundErr(toRevision) + } + accessor, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + name := accessor.GetName() + namespace := accessor.GetNamespace() + + // TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object + // to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're + // currently getting rid of all internal versions of resources. So we specifically request the appsv1 version + // here. This follows the same pattern as for DaemonSet and StatefulSet. + deployment, err := r.c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err) + } + + rsForRevision, err := deploymentRevision(deployment, r.c, toRevision) + if err != nil { + return "", err + } + if dryRunStrategy == cmdutil.DryRunClient { + return printTemplate(&rsForRevision.Spec.Template) + } + if deployment.Spec.Paused { + return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name) + } + + // Skip if the revision already matches current Deployment + if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) { + return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil + } + + // remove hash label before patching back into the deployment + delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + + // compute deployment annotations + annotations := map[string]string{} + for k := range annotationsToSkip { + if v, ok := deployment.Annotations[k]; ok { + annotations[k] = v + } + } + for k, v := range rsForRevision.Annotations { + if !annotationsToSkip[k] { + annotations[k] = v + } + } + + // make patch to restore + patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations) + if err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) + } + + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } + // Restore revision + if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch, patchOptions); err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) + } + return rollbackSuccess, nil +} + +// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// annotationsToSkip lists the annotations that should be preserved from the deployment and not +// copied from the replicaset when rolling a deployment back +var annotationsToSkip = map[string]bool{ + corev1.LastAppliedConfigAnnotation: true, + deploymentutil.RevisionAnnotation: true, + deploymentutil.RevisionHistoryAnnotation: true, + deploymentutil.DesiredReplicasAnnotation: true, + deploymentutil.MaxReplicasAnnotation: true, + appsv1.DeprecatedRollbackTo: true, +} + +// getPatch returns a patch that can be applied to restore a Deployment to a +// previous version. If the returned error is nil the patch is valid. +func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) { + // Create a patch of the Deployment that replaces spec.template + patch, err := json.Marshal([]interface{}{ + map[string]interface{}{ + "op": "replace", + "path": "/spec/template", + "value": podTemplate, + }, + map[string]interface{}{ + "op": "replace", + "path": "/metadata/annotations", + "value": annotations, + }, + }) + return types.JSONPatchType, patch, err +} + +func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) { + + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) + if err != nil { + return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) + } + allRSs := allOldRSs + if newRS != nil { + allRSs = append(allRSs, newRS) + } + + var ( + latestReplicaSet *appsv1.ReplicaSet + latestRevision = int64(-1) + previousReplicaSet *appsv1.ReplicaSet + previousRevision = int64(-1) + ) + for _, rs := range allRSs { + if v, err := deploymentutil.Revision(rs); err == nil { + if toRevision == 0 { + if latestRevision < v { + // newest one we've seen so far + previousRevision = latestRevision + previousReplicaSet = latestReplicaSet + latestRevision = v + latestReplicaSet = rs + } else if previousRevision < v { + // second newest one we've seen so far + previousRevision = v + previousReplicaSet = rs + } + } else if toRevision == v { + return rs, nil + } + } + } + + if toRevision > 0 { + return nil, revisionNotFoundErr(toRevision) + } + + if previousReplicaSet == nil { + return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name) + } + return previousReplicaSet, nil +} + +type DaemonSetRollbacker struct { + c kubernetes.Interface +} + +func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + if toRevision < 0 { + return "", revisionNotFoundErr(toRevision) + } + accessor, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return "", err + } + if toRevision == 0 && len(history) <= 1 { + return "", fmt.Errorf("no last revision to roll back to") + } + + toHistory := findHistory(toRevision, history) + if toHistory == nil { + return "", revisionNotFoundErr(toRevision) + } + + if dryRunStrategy == cmdutil.DryRunClient { + appliedDS, err := applyDaemonSetHistory(ds, toHistory) + if err != nil { + return "", err + } + return printPodTemplate(&appliedDS.Spec.Template) + } + + // Skip if the revision already matches current DaemonSet + done, err := daemonSetMatch(ds, toHistory) + if err != nil { + return "", err + } + if done { + return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil + } + + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } + // Restore revision + if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw, patchOptions); err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) + } + + return rollbackSuccess, nil +} + +// daemonMatch check if the given DaemonSet's template matches the template stored in the given history. +func daemonSetMatch(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (bool, error) { + patch, err := getDaemonSetPatch(ds) + if err != nil { + return false, err + } + return bytes.Equal(patch, history.Data.Raw), nil +} + +// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a +// previous version. If the returned error is nil the patch is valid. The current state that we save is just the +// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously +// recorded patches. +func getDaemonSetPatch(ds *appsv1.DaemonSet) ([]byte, error) { + dsBytes, err := json.Marshal(ds) + if err != nil { + return nil, err + } + var raw map[string]interface{} + err = json.Unmarshal(dsBytes, &raw) + if err != nil { + return nil, err + } + objCopy := make(map[string]interface{}) + specCopy := make(map[string]interface{}) + + // Create a patch of the DaemonSet that replaces spec.template + spec := raw["spec"].(map[string]interface{}) + template := spec["template"].(map[string]interface{}) + specCopy["template"] = template + template["$patch"] = "replace" + objCopy["spec"] = specCopy + patch, err := json.Marshal(objCopy) + return patch, err +} + +type StatefulSetRollbacker struct { + c kubernetes.Interface +} + +// toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration +func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + if toRevision < 0 { + return "", revisionNotFoundErr(toRevision) + } + accessor, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + sts, history, err := statefulSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return "", err + } + if toRevision == 0 && len(history) <= 1 { + return "", fmt.Errorf("no last revision to roll back to") + } + + toHistory := findHistory(toRevision, history) + if toHistory == nil { + return "", revisionNotFoundErr(toRevision) + } + + if dryRunStrategy == cmdutil.DryRunClient { + appliedSS, err := applyRevision(sts, toHistory) + if err != nil { + return "", err + } + return printPodTemplate(&appliedSS.Spec.Template) + } + + // Skip if the revision already matches current StatefulSet + done, err := statefulsetMatch(sts, toHistory) + if err != nil { + return "", err + } + if done { + return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil + } + + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } + // Restore revision + if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw, patchOptions); err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) + } + + return rollbackSuccess, nil +} + +var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) + +// applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error +// is nil, the returned StatefulSet is valid. +func applyRevision(set *appsv1.StatefulSet, revision *appsv1.ControllerRevision) (*appsv1.StatefulSet, error) { + patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(appsCodec, set)), revision.Data.Raw, set) + if err != nil { + return nil, err + } + result := &appsv1.StatefulSet{} + err = json.Unmarshal(patched, result) + if err != nil { + return nil, err + } + return result, nil +} + +// statefulsetMatch check if the given StatefulSet's template matches the template stored in the given history. +func statefulsetMatch(ss *appsv1.StatefulSet, history *appsv1.ControllerRevision) (bool, error) { + patch, err := getStatefulSetPatch(ss) + if err != nil { + return false, err + } + return bytes.Equal(patch, history.Data.Raw), nil +} + +// getStatefulSetPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a +// previous version. If the returned error is nil the patch is valid. The current state that we save is just the +// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously +// recorded patches. +func getStatefulSetPatch(set *appsv1.StatefulSet) ([]byte, error) { + str, err := runtime.Encode(appsCodec, set) + if err != nil { + return nil, err + } + var raw map[string]interface{} + if err := json.Unmarshal([]byte(str), &raw); err != nil { + return nil, err + } + objCopy := make(map[string]interface{}) + specCopy := make(map[string]interface{}) + spec := raw["spec"].(map[string]interface{}) + template := spec["template"].(map[string]interface{}) + specCopy["template"] = template + template["$patch"] = "replace" + objCopy["spec"] = specCopy + patch, err := json.Marshal(objCopy) + return patch, err +} + +// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. +// It returns nil if no such controllerrevision exists. +// If toRevision is 0, the last previously used history is returned. +func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { + if toRevision == 0 && len(allHistory) <= 1 { + return nil + } + + // Find the history to rollback to + var toHistory *appsv1.ControllerRevision + if toRevision == 0 { + // If toRevision == 0, find the latest revision (2nd max) + sort.Sort(historiesByRevision(allHistory)) + toHistory = allHistory[len(allHistory)-2] + } else { + for _, h := range allHistory { + if h.Revision == toRevision { + // If toRevision != 0, find the history with matching revision + return h + } + } + } + + return toHistory +} + +// printPodTemplate converts a given pod template into a human-readable string. +func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) { + podSpec, err := printTemplate(specTemplate) + if err != nil { + return "", err + } + return fmt.Sprintf("will roll back to %s", podSpec), nil +} + +func revisionNotFoundErr(r int64) error { + return fmt.Errorf("unable to find specified revision %v in history", r) +} + +// TODO: copied from daemon controller, should extract to a library +type historiesByRevision []*appsv1.ControllerRevision + +func (h historiesByRevision) Len() int { return len(h) } +func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h historiesByRevision) Less(i, j int) bool { + return h[i].Revision < h[j].Revision +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollbacker.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollbacker.go new file mode 100644 index 000000000..f02223690 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollbacker.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" +) + +// Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error +func rollbacker(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Rollbacker, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + external, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + return RollbackerFor(mapping.GroupVersionKind.GroupKind(), external) +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go new file mode 100644 index 000000000..86af73350 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + deploymentutil "k8s.io/kubectl/pkg/util/deployment" +) + +// StatusViewer provides an interface for resources that have rollout status. +type StatusViewer interface { + Status(obj runtime.Unstructured, revision int64) (string, bool, error) +} + +// StatusViewerFor returns a StatusViewer for the resource specified by kind. +func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) { + switch kind { + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(): + return &DeploymentStatusViewer{}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(): + return &DaemonSetStatusViewer{}, nil + case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): + return &StatefulSetStatusViewer{}, nil + } + return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) +} + +// DeploymentStatusViewer implements the StatusViewer interface. +type DeploymentStatusViewer struct{} + +// DaemonSetStatusViewer implements the StatusViewer interface. +type DaemonSetStatusViewer struct{} + +// StatefulSetStatusViewer implements the StatusViewer interface. +type StatefulSetStatusViewer struct{} + +// Status returns a message describing deployment status, and a bool value indicating if the status is considered done. +func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { + deployment := &appsv1.Deployment{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), deployment) + if err != nil { + return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, deployment, err) + } + + if revision > 0 { + deploymentRev, err := deploymentutil.Revision(deployment) + if err != nil { + return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) + } + if revision != deploymentRev { + return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev) + } + } + if deployment.Generation <= deployment.Status.ObservedGeneration { + cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) + if cond != nil && cond.Reason == deploymentutil.TimedOutReason { + return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name) + } + if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil + } + if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil + } + if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil + } + return fmt.Sprintf("deployment %q successfully rolled out\n", deployment.Name), true, nil + } + return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil +} + +// Status returns a message describing daemon set status, and a bool value indicating if the status is considered done. +func (s *DaemonSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { + //ignoring revision as DaemonSets does not have history yet + + daemon := &appsv1.DaemonSet{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), daemon) + if err != nil { + return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, daemon, err) + } + + if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType) + } + if daemon.Generation <= daemon.Status.ObservedGeneration { + if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled { + return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...\n", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil + } + if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled { + return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...\n", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil + } + return fmt.Sprintf("daemon set %q successfully rolled out\n", daemon.Name), true, nil + } + return fmt.Sprintf("Waiting for daemon set spec update to be observed...\n"), false, nil +} + +// Status returns a message describing statefulset status, and a bool value indicating if the status is considered done. +func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { + sts := &appsv1.StatefulSet{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), sts) + if err != nil { + return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, sts, err) + } + + if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { + return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType) + } + if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration { + return "Waiting for statefulset spec update to be observed...\n", false, nil + } + if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { + return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil + } + if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { + if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { + return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", + sts.Status.UpdatedReplicas, *sts.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition), false, nil + } + } + return fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", + sts.Status.UpdatedReplicas), true, nil + } + if sts.Status.UpdateRevision != sts.Status.CurrentRevision { + return fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...\n", + sts.Status.UpdatedReplicas, sts.Status.UpdateRevision), false, nil + } + return fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...\n", sts.Status.CurrentReplicas, sts.Status.CurrentRevision), true, nil + +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/statusviewer.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/statusviewer.go new file mode 100644 index 000000000..0d6dd39f4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/statusviewer.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "k8s.io/apimachinery/pkg/api/meta" +) + +// statusViewer returns a StatusViewer for printing rollout status. +func statusViewer(mapping *meta.RESTMapping) (StatusViewer, error) { + return StatusViewerFor(mapping.GroupVersionKind.GroupKind()) +} diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/updatepodspec.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/updatepodspec.go new file mode 100644 index 000000000..f386447c1 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/updatepodspec.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func updatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) { + switch t := obj.(type) { + case *v1.Pod: + return true, fn(&t.Spec) + // ReplicationController + case *v1.ReplicationController: + if t.Spec.Template == nil { + t.Spec.Template = &v1.PodTemplateSpec{} + } + return true, fn(&t.Spec.Template.Spec) + + // Deployment + case *extensionsv1beta1.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta1.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.Deployment: + return true, fn(&t.Spec.Template.Spec) + + // DaemonSet + case *extensionsv1beta1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + + // ReplicaSet + case *extensionsv1beta1.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + + // StatefulSet + case *appsv1beta1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + + // Job + case *batchv1.Job: + return true, fn(&t.Spec.Template.Spec) + + // CronJob + case *batchv1beta1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) + case *batchv1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) + + default: + return false, fmt.Errorf("the object is not a pod or does not have a pod template: %T", t) + } +} diff --git a/vendor/k8s.io/kubectl/pkg/util/certificate/certificate.go b/vendor/k8s.io/kubectl/pkg/util/certificate/certificate.go new file mode 100644 index 000000000..c55e5963f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/certificate/certificate.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(pemBytes []byte) (*x509.CertificateRequest, error) { + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/deployment/deployment.go b/vendor/k8s.io/kubectl/pkg/util/deployment/deployment.go new file mode 100644 index 000000000..38e90c37c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/deployment/deployment.go @@ -0,0 +1,257 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "sort" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + runtimeresource "k8s.io/cli-runtime/pkg/resource" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +const ( + // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence + RevisionAnnotation = "deployment.kubernetes.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. + RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation + // in its replica sets. Helps in separating scaling events from the rollout process and for + // determining if the new replica set for a deployment is really saturated. + DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which + // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their + // proportions in case the deployment has surge replicas. + MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" + // RollbackRevisionNotFound is not found rollback event reason + RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" + // RollbackTemplateUnchanged is the template unchanged rollback event reason + RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" + // RollbackDone is the done rollback event reason + RollbackDone = "DeploymentRollback" + // TimedOutReason is added in a deployment when its newest replica set fails to show any progress + // within the given deadline (progressDeadlineSeconds). + TimedOutReason = "ProgressDeadlineExceeded" +) + +// GetDeploymentCondition returns the condition with the provided type. +func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// Revision returns the revision number of the input object. +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and +// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones +// with no pods, and the second set of old replica sets include all old replica sets. The third returned value +// is the new replica set, and it may be nil if it doesn't exist yet. +func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, rsListFromClient(c), nil) + if err != nil { + return nil, nil, nil, err + } + newRS := findNewReplicaSet(deployment, rsList) + oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList, newRS) + return oldRSes, allOldRSes, newRS, nil +} + +// GetAllReplicaSetsInChunks is the same as GetAllReplicaSets, but accepts a chunk size argument. +// It returns the old and new replica sets targeted by the given Deployment. It gets PodList and +// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones +// with no pods, and the second set of old replica sets include all old replica sets. The third returned value +// is the new replica set, and it may be nil if it doesn't exist yet. +func GetAllReplicaSetsInChunks(deployment *appsv1.Deployment, c appsclient.AppsV1Interface, chunkSize int64) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, rsListFromClient(c), &chunkSize) + if err != nil { + return nil, nil, nil, err + } + newRS := findNewReplicaSet(deployment, rsList) + oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList, newRS) + return oldRSes, allOldRSes, newRS, nil +} + +// RsListFromClient returns an rsListFunc that wraps the given client. +func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { + return func(namespace string, initialOpts metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { + rsList := &appsv1.ReplicaSetList{} + err := runtimeresource.FollowContinue(&initialOpts, + func(opts metav1.ListOptions) (runtime.Object, error) { + newRs, err := c.ReplicaSets(namespace).List(context.TODO(), opts) + if err != nil { + return nil, runtimeresource.EnhanceListError(err, opts, "replicasets") + } + rsList.Items = append(rsList.Items, newRs.Items...) + return newRs, nil + }) + if err != nil { + return nil, err + } + var ret []*appsv1.ReplicaSet + for i := range rsList.Items { + ret = append(ret, &rsList.Items[i]) + } + return ret, err + } +} + +// TODO: switch this to full namespacers +type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error) + +// listReplicaSets returns a slice of RSes the given deployment targets. +// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), +// because only the controller itself should do that. +// However, it does filter out anything whose ControllerRef doesn't match. +func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc, chunkSize *int64) ([]*appsv1.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. + namespace := deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + if chunkSize != nil { + options.Limit = *chunkSize + } + all, err := getRSList(namespace, options) + if err != nil { + return nil, err + } + // Only include those whose ControllerRef matches the Deployment. + owned := make([]*appsv1.ReplicaSet, 0, len(all)) + for _, rs := range all { + if metav1.IsControlledBy(rs, deployment) { + owned = append(owned, rs) + } + } + return owned, nil +} + +// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet { + sort.Sort(replicaSetsByCreationTimestamp(rsList)) + for i := range rsList { + if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new ReplicaSets that have the same template as its template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new ReplicaSet. + return rsList[i] + } + } + // new ReplicaSet does not exist. + return nil +} + +// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet + +func (o replicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o replicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. +// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) { + var requiredRSs []*appsv1.ReplicaSet + var allRSs []*appsv1.ReplicaSet + for _, rs := range rsList { + // Filter out new replica set + if newRS != nil && rs.UID == newRS.UID { + continue + } + allRSs = append(allRSs, rs) + if *(rs.Spec.Replicas) != 0 { + requiredRSs = append(requiredRSs, rs) + } + } + return requiredRSs, allRSs +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/event/sorted_event_list.go b/vendor/k8s.io/kubectl/pkg/util/event/sorted_event_list.go new file mode 100644 index 000000000..9967f953e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/event/sorted_event_list.go @@ -0,0 +1,36 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field +type SortableEvents []corev1.Event + +func (list SortableEvents) Len() int { + return len(list) +} + +func (list SortableEvents) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableEvents) Less(i, j int) bool { + return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/fieldpath/fieldpath.go b/vendor/k8s.io/kubectl/pkg/util/fieldpath/fieldpath.go new file mode 100644 index 000000000..efd08cde0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/fieldpath/fieldpath.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/apimachinery + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go b/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go new file mode 100644 index 000000000..847eb7e88 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go @@ -0,0 +1,188 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podutils + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/integer" +) + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := getPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *corev1.Pod) bool { + return isPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func isPodReadyConditionTrue(status corev1.PodStatus) bool { + condition := getPodReadyCondition(status) + return condition != nil && condition.Status == corev1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { + _, condition := getPodCondition(&status, corev1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if status == nil { + return -1, nil + } + return getPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*corev1.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*corev1.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return !IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 *metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *corev1.Pod) *metav1.Time { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return &c.LastTransitionTime + } + } + return &metav1.Time{} +} + +func maxContainerRestarts(pod *corev1.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} diff --git a/vendor/k8s.io/kubectl/pkg/util/qos/qos.go b/vendor/k8s.io/kubectl/pkg/util/qos/qos.go new file mode 100644 index 000000000..2715e6375 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/qos/qos.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qos + +import ( + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + +func isSupportedQoSComputeResource(name core.ResourceName) bool { + return supportedQoSComputeResources.Has(string(name)) +} + +// GetPodQOS returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { + requests := core.ResourceList{} + limits := core.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + allContainers := []core.Container{} + allContainers = append(allContainers, pod.Spec.Containers...) + allContainers = append(allContainers, pod.Spec.InitContainers...) + for _, container := range allContainers { + // process requests + for name, quantity := range container.Resources.Requests { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := requests[name]; !exists { + requests[name] = delta + } else { + delta.Add(requests[name]) + requests[name] = delta + } + } + } + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.DeepCopy() + if _, exists := limits[name]; !exists { + limits[name] = delta + } else { + delta.Add(limits[name]) + limits[name] = delta + } + } + } + + if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return core.PodQOSBestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) { + return core.PodQOSGuaranteed + } + return core.PodQOSBurstable +} diff --git a/vendor/k8s.io/kubectl/pkg/util/rbac/rbac.go b/vendor/k8s.io/kubectl/pkg/util/rbac/rbac.go new file mode 100644 index 000000000..b5d1f2d52 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/rbac/rbac.go @@ -0,0 +1,135 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/util/sets" + "reflect" + "strings" +) + +type simpleResource struct { + Group string + Resource string + ResourceNameExist bool + ResourceName string +} + +// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. +// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. +func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { + compacted := make([]rbacv1.PolicyRule, 0, len(rules)) + + simpleRules := map[simpleResource]*rbacv1.PolicyRule{} + for _, rule := range rules { + if resource, isSimple := isSimpleResourceRule(&rule); isSimple { + if existingRule, ok := simpleRules[resource]; ok { + // Add the new verbs to the existing simple resource rule + if existingRule.Verbs == nil { + existingRule.Verbs = []string{} + } + existingVerbs := sets.NewString(existingRule.Verbs...) + for _, verb := range rule.Verbs { + if !existingVerbs.Has(verb) { + existingRule.Verbs = append(existingRule.Verbs, verb) + } + } + + } else { + // Copy the rule to accumulate matching simple resource rules into + simpleRules[resource] = rule.DeepCopy() + } + } else { + compacted = append(compacted, rule) + } + } + + // Once we've consolidated the simple resource rules, add them to the compacted list + for _, simpleRule := range simpleRules { + compacted = append(compacted, *simpleRule) + } + + return compacted, nil +} + +// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values +func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { + resource := simpleResource{} + + // If we have "complex" rule attributes, return early without allocations or expensive comparisons + if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 { + return resource, false + } + // If we have multiple api groups or resources, return early + if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 { + return resource, false + } + + // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames + simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} + if !reflect.DeepEqual(simpleRule, rule) { + return resource, false + } + + if len(rule.ResourceNames) == 0 { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false} + } else { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]} + } + + return resource, true +} + +// BreakdownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one +// resource, and one resource name +func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { + subrules := []rbacv1.PolicyRule{} + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + if len(rule.ResourceNames) > 0 { + for _, resourceName := range rule.ResourceNames { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + } + + } else { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + } + + } + } + } + + // Non-resource URLs are unique because they only combine with verbs. + for _, nonResourceURL := range rule.NonResourceURLs { + for _, verb := range rule.Verbs { + subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) + } + } + + return subrules +} + +// SortableRuleSlice is used to sort rule slice +type SortableRuleSlice []rbacv1.PolicyRule + +func (s SortableRuleSlice) Len() int { return len(s) } +func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SortableRuleSlice) Less(i, j int) bool { + return strings.Compare(s[i].String(), s[j].String()) < 0 +} diff --git a/vendor/k8s.io/kubectl/pkg/util/resource/resource.go b/vendor/k8s.io/kubectl/pkg/util/resource/resource.go new file mode 100644 index 000000000..44ddf96ac --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/resource/resource.go @@ -0,0 +1,172 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "math" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. If pod overhead is non-nil, the pod overhead is added to the +// total container resource requests and to the total container limits which have a +// non-zero quantity. +func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) { + reqs, limits = corev1.ResourceList{}, corev1.ResourceList{} + for _, container := range pod.Spec.Containers { + addResourceList(reqs, container.Resources.Requests) + addResourceList(limits, container.Resources.Limits) + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + maxResourceList(reqs, container.Resources.Requests) + maxResourceList(limits, container.Resources.Limits) + } + + // Add overhead for running a pod to the sum of requests and to non-zero limits: + if pod.Spec.Overhead != nil { + addResourceList(reqs, pod.Spec.Overhead) + + for name, quantity := range pod.Spec.Overhead { + if value, ok := limits[name]; ok && !value.IsZero() { + value.Add(quantity) + limits[name] = value + } + } + } + return +} + +// addResourceList adds the resources in newList to list +func addResourceList(list, new corev1.ResourceList) { + for name, quantity := range new { + if value, ok := list[name]; !ok { + list[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + list[name] = value + } + } +} + +// maxResourceList sets list to the greater of list/newList for every resource +// either list +func maxResourceList(list, new corev1.ResourceList) { + for name, quantity := range new { + if value, ok := list[name]; !ok { + list[name] = quantity.DeepCopy() + continue + } else { + if quantity.Cmp(value) > 0 { + list[name] = quantity.DeepCopy() + } + } + } +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "limits.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + case "requests.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) + } + // handle extended standard resources with dynamic names + // example: requests.hugepages- or limits.hugepages- + if strings.HasPrefix(fs.Resource, "requests.") { + resourceName := corev1.ResourceName(strings.TrimPrefix(fs.Resource, "requests.")) + if IsHugePageResourceName(resourceName) { + return convertResourceHugePagesToString(container.Resources.Requests.Name(resourceName, resource.BinarySI), divisor) + } + } + if strings.HasPrefix(fs.Resource, "limits.") { + resourceName := corev1.ResourceName(strings.TrimPrefix(fs.Resource, "limits.")) + if IsHugePageResourceName(resourceName) { + return convertResourceHugePagesToString(container.Resources.Limits.Name(resourceName, resource.BinarySI), divisor) + } + } + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceHugePagesToString converts hugepages value to the format of divisor and returns +// ceiling of the value. +func convertResourceHugePagesToString(hugePages *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(hugePages.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns +// ceiling of the value. +func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +var standardContainerResources = sets.NewString( + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str)) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name corev1.ResourceName) bool { + return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/slice/slice.go b/vendor/k8s.io/kubectl/pkg/util/slice/slice.go new file mode 100644 index 000000000..f997d5cb4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/slice/slice.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slice + +import ( + "sort" +) + +// SortInts64 sorts []int64 in increasing order +func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubectl/pkg/util/storage/storage.go b/vendor/k8s.io/kubectl/pkg/util/storage/storage.go new file mode 100644 index 000000000..1f25cf1ab --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/storage/storage.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + +// BetaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation. +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if ContainsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if ContainsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if ContainsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + if ContainsAccessMode(modes, v1.ReadWriteOncePod) { + modesStr = append(modesStr, "RWOP") + } + return strings.Join(modesStr, ",") +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !ContainsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b687891c8..21168f12f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -67,6 +67,9 @@ github.com/evanphx/json-patch/v5 # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d ## explicit github.com/exponent-io/jsonpath +# github.com/fatih/camelcase v1.0.0 +## explicit +github.com/fatih/camelcase # github.com/felixge/httpsnoop v1.0.1 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -1259,6 +1262,7 @@ k8s.io/client-go/tools/watch k8s.io/client-go/transport k8s.io/client-go/transport/spdy k8s.io/client-go/util/cert +k8s.io/client-go/util/certificate/csr k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol @@ -1373,15 +1377,29 @@ k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubectl v0.23.4 ## explicit; go 1.16 +k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/apiresources k8s.io/kubectl/pkg/cmd/get k8s.io/kubectl/pkg/cmd/util +k8s.io/kubectl/pkg/cmd/util/podcmd +k8s.io/kubectl/pkg/describe +k8s.io/kubectl/pkg/polymorphichelpers k8s.io/kubectl/pkg/rawhttp k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util/certificate +k8s.io/kubectl/pkg/util/deployment +k8s.io/kubectl/pkg/util/event +k8s.io/kubectl/pkg/util/fieldpath k8s.io/kubectl/pkg/util/i18n k8s.io/kubectl/pkg/util/interrupt k8s.io/kubectl/pkg/util/openapi k8s.io/kubectl/pkg/util/openapi/validation +k8s.io/kubectl/pkg/util/podutils +k8s.io/kubectl/pkg/util/qos +k8s.io/kubectl/pkg/util/rbac +k8s.io/kubectl/pkg/util/resource +k8s.io/kubectl/pkg/util/slice +k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation