From 07870d830c4c787670f147c2972c085dd02172b7 Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Thu, 13 Feb 2020 15:26:48 +0100 Subject: [PATCH] Updating vendor against https://github.com/kubernetes/kubernetes.git:master (d88304507d3b3ff8a15f0948c21be5892693a12f) --- cluster-autoscaler/go.mod | 69 +- cluster-autoscaler/go.sum | 38 +- .../vendor/github.com/cilium/ebpf/.gitignore | 12 + .../github.com/cilium/ebpf/CODE_OF_CONDUCT.md | 46 ++ .../vendor/github.com/cilium/ebpf/LICENSE | 23 + .../vendor/github.com/cilium/ebpf/abi.go | 183 ++++++ .../vendor/github.com/cilium/ebpf/asm/alu.go | 149 +++++ .../github.com/cilium/ebpf/asm/alu_string.go | 107 ++++ .../vendor/github.com/cilium/ebpf/asm/doc.go | 2 + .../vendor/github.com/cilium/ebpf/asm/func.go | 143 +++++ .../github.com/cilium/ebpf/asm/func_string.go | 133 ++++ .../github.com/cilium/ebpf/asm/instruction.go | 416 ++++++++++++ .../vendor/github.com/cilium/ebpf/asm/jump.go | 109 ++++ .../github.com/cilium/ebpf/asm/jump_string.go | 53 ++ .../github.com/cilium/ebpf/asm/load_store.go | 189 ++++++ .../cilium/ebpf/asm/load_store_string.go | 80 +++ .../github.com/cilium/ebpf/asm/opcode.go | 237 +++++++ .../cilium/ebpf/asm/opcode_string.go | 38 ++ .../github.com/cilium/ebpf/asm/register.go | 42 ++ .../github.com/cilium/ebpf/collection.go | 148 +++++ .../vendor/github.com/cilium/ebpf/doc.go | 17 + .../github.com/cilium/ebpf/elf_reader.go | 392 ++++++++++++ .../vendor/github.com/cilium/ebpf/feature.go | 19 + .../vendor/github.com/cilium/ebpf/go.mod | 8 + .../vendor/github.com/cilium/ebpf/go.sum | 4 + .../github.com/cilium/ebpf/internal/cpu.go | 64 ++ .../github.com/cilium/ebpf/internal/endian.go | 24 + .../cilium/ebpf/internal/unix/types_linux.go | 118 ++++ .../cilium/ebpf/internal/unix/types_other.go | 183 ++++++ .../vendor/github.com/cilium/ebpf/linker.go | 58 ++ .../vendor/github.com/cilium/ebpf/map.go | 595 ++++++++++++++++++ .../github.com/cilium/ebpf/marshalers.go | 192 ++++++ .../vendor/github.com/cilium/ebpf/prog.go | 523 +++++++++++++++ .../github.com/cilium/ebpf/ptr_32_be.go | 14 + .../github.com/cilium/ebpf/ptr_32_le.go | 14 + .../vendor/github.com/cilium/ebpf/ptr_64.go | 14 + .../vendor/github.com/cilium/ebpf/readme.md | 20 + .../github.com/cilium/ebpf/run-tests.sh | 67 ++ .../vendor/github.com/cilium/ebpf/syscalls.go | 420 +++++++++++++ .../vendor/github.com/cilium/ebpf/types.go | 189 ++++++ .../github.com/cilium/ebpf/types_string.go | 78 +++ .../docker/libnetwork/ipvs/constants.go | 11 + .../docker/libnetwork/ipvs/netlink.go | 39 +- .../vendor/github.com/google/gofuzz/README.md | 2 +- .../vendor/github.com/google/gofuzz/fuzz.go | 35 +- .../runc/libcontainer/cgroups/cgroups.go | 10 + .../cgroups/ebpf/devicefilter/devicefilter.go | 180 ++++++ .../runc/libcontainer/cgroups/ebpf/ebpf.go | 45 ++ .../runc/libcontainer/cgroups/fs/apply_raw.go | 37 +- .../runc/libcontainer/cgroups/fs/blkio.go | 17 +- .../runc/libcontainer/cgroups/fs/cpu.go | 13 +- .../runc/libcontainer/cgroups/fs/cpu_v2.go | 92 --- .../runc/libcontainer/cgroups/fs/cpuacct.go | 7 +- .../runc/libcontainer/cgroups/fs/cpuset.go | 9 +- .../runc/libcontainer/cgroups/fs/cpuset_v2.go | 159 ----- .../runc/libcontainer/cgroups/fs/devices.go | 11 +- .../runc/libcontainer/cgroups/fs/freezer.go | 5 +- .../libcontainer/cgroups/fs/freezer_v2.go | 74 --- .../runc/libcontainer/cgroups/fs/hugetlb.go | 9 +- .../runc/libcontainer/cgroups/fs/io_v2.go | 191 ------ .../runc/libcontainer/cgroups/fs/memory.go | 33 +- .../runc/libcontainer/cgroups/fs/memory_v2.go | 164 ----- .../runc/libcontainer/cgroups/fs/net_cls.go | 3 +- .../runc/libcontainer/cgroups/fs/net_prio.go | 3 +- .../runc/libcontainer/cgroups/fs/pids.go | 9 +- .../runc/libcontainer/cgroups/fs/pids_v2.go | 107 ---- .../runc/libcontainer/cgroups/fs2/cpu.go | 56 ++ .../runc/libcontainer/cgroups/fs2/cpuset.go | 22 + .../libcontainer/cgroups/fs2/defaultpath.go | 99 +++ .../runc/libcontainer/cgroups/fs2/devices.go | 73 +++ .../runc/libcontainer/cgroups/fs2/freezer.go | 53 ++ .../runc/libcontainer/cgroups/fs2/fs2.go | 214 +++++++ .../runc/libcontainer/cgroups/fs2/io.go | 124 ++++ .../runc/libcontainer/cgroups/fs2/memory.go | 103 +++ .../runc/libcontainer/cgroups/fs2/pids.go | 90 +++ .../libcontainer/cgroups/fscommon/fscommon.go | 36 ++ .../cgroups/{fs => fscommon}/utils.go | 14 +- .../cgroups/systemd/apply_nosystemd.go | 10 +- .../cgroups/systemd/apply_systemd.go | 10 +- .../cgroups/systemd/unified_hierarchy.go | 131 ++-- .../runc/libcontainer/cgroups/utils.go | 29 +- .../runc/libcontainer/container_linux.go | 91 +-- .../runc/libcontainer/factory_linux.go | 32 + .../runc/libcontainer/init_linux.go | 11 +- .../runc/libcontainer/network_linux.go | 5 +- .../runc/libcontainer/rootfs_linux.go | 22 +- .../opencontainers/runc/libcontainer/stats.go | 15 - .../runc/libcontainer/stats_linux.go | 9 +- .../opencontainers/runc/types/events.go | 130 ++++ .../prometheus/client_model/go/metrics.pb.go | 266 +++++--- .../k8s.io/api/authentication/v1/types.go | 2 +- .../api/authentication/v1beta1/types.go | 2 +- .../k8s.io/api/authorization/v1/types.go | 8 +- .../k8s.io/api/authorization/v1beta1/types.go | 8 +- .../vendor/k8s.io/api/core/v1/types.go | 1 + .../api/flowcontrol/v1alpha1/generated.proto | 12 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 12 +- .../apimachinery/pkg/api/resource/math.go | 8 +- .../apimachinery/pkg/api/resource/quantity.go | 5 + .../apimachinery/pkg/apis/meta/v1/duration.go | 5 + .../apimachinery/pkg/apis/meta/v1/time.go | 10 + .../k8s.io/apimachinery/pkg/runtime/BUILD | 1 + .../apimachinery/pkg/runtime/converter.go | 101 +-- .../apimachinery/pkg/runtime/serializer/BUILD | 1 + .../k8s.io/apimachinery/pkg/util/wait/BUILD | 10 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 123 +++- .../plugin/namespace/lifecycle/admission.go | 2 +- .../plugin/webhook/mutating/dispatcher.go | 5 +- .../plugin/webhook/namespace/matcher.go | 3 +- .../plugin/webhook/validating/dispatcher.go | 6 +- .../pkg/authentication/token/cache/stats.go | 25 +- .../pkg/endpoints/handlers/create.go | 39 +- .../pkg/endpoints/handlers/fieldmanager/BUILD | 4 + .../handlers/fieldmanager/capmanagers.go | 15 +- .../handlers/fieldmanager/fieldmanager.go | 2 +- .../fieldmanager/internal/managedfields.go | 15 +- .../fieldmanager/internal/typeconverter.go | 20 +- .../handlers/fieldmanager/structuredmerge.go | 4 +- .../apiserver/pkg/endpoints/installer.go | 15 +- .../apiserver/pkg/endpoints/metrics/BUILD | 8 - .../pkg/endpoints/metrics/metrics.go | 21 +- .../apiserver/pkg/features/kube_features.go | 7 + .../apiserver/pkg/registry/generic/BUILD | 1 + .../apiserver/pkg/registry/generic/options.go | 2 + .../pkg/registry/generic/storage_decorator.go | 7 +- .../apiserver/pkg/registry/rest/rest.go | 5 + .../apiserver/pkg/storage/interfaces.go | 6 + .../pkg/storage/selection_predicate.go | 29 + .../pkg/authenticator/token/webhook/BUILD | 1 + .../authenticator/token/webhook/webhook.go | 13 +- .../plugin/pkg/authorizer/webhook/BUILD | 1 + .../plugin/pkg/authorizer/webhook/webhook.go | 13 +- .../client-go/discovery/discovery_client.go | 13 +- .../vendor/k8s.io/client-go/dynamic/simple.go | 19 +- .../v1/mutatingwebhookconfiguration.go | 5 +- .../v1/validatingwebhookconfiguration.go | 5 +- .../v1beta1/mutatingwebhookconfiguration.go | 5 +- .../v1beta1/validatingwebhookconfiguration.go | 5 +- .../informers/apps/v1/controllerrevision.go | 5 +- .../client-go/informers/apps/v1/daemonset.go | 5 +- .../client-go/informers/apps/v1/deployment.go | 5 +- .../client-go/informers/apps/v1/replicaset.go | 5 +- .../informers/apps/v1/statefulset.go | 5 +- .../apps/v1beta1/controllerrevision.go | 5 +- .../informers/apps/v1beta1/deployment.go | 5 +- .../informers/apps/v1beta1/statefulset.go | 5 +- .../apps/v1beta2/controllerrevision.go | 5 +- .../informers/apps/v1beta2/daemonset.go | 5 +- .../informers/apps/v1beta2/deployment.go | 5 +- .../informers/apps/v1beta2/replicaset.go | 5 +- .../informers/apps/v1beta2/statefulset.go | 5 +- .../auditregistration/v1alpha1/auditsink.go | 5 +- .../autoscaling/v1/horizontalpodautoscaler.go | 5 +- .../v2beta1/horizontalpodautoscaler.go | 5 +- .../v2beta2/horizontalpodautoscaler.go | 5 +- .../client-go/informers/batch/v1/job.go | 5 +- .../informers/batch/v1beta1/cronjob.go | 5 +- .../informers/batch/v2alpha1/cronjob.go | 5 +- .../v1beta1/certificatesigningrequest.go | 5 +- .../informers/coordination/v1/lease.go | 5 +- .../informers/coordination/v1beta1/lease.go | 5 +- .../informers/core/v1/componentstatus.go | 5 +- .../client-go/informers/core/v1/configmap.go | 5 +- .../client-go/informers/core/v1/endpoints.go | 5 +- .../client-go/informers/core/v1/event.go | 5 +- .../client-go/informers/core/v1/limitrange.go | 5 +- .../client-go/informers/core/v1/namespace.go | 5 +- .../client-go/informers/core/v1/node.go | 5 +- .../informers/core/v1/persistentvolume.go | 5 +- .../core/v1/persistentvolumeclaim.go | 5 +- .../k8s.io/client-go/informers/core/v1/pod.go | 5 +- .../informers/core/v1/podtemplate.go | 5 +- .../core/v1/replicationcontroller.go | 5 +- .../informers/core/v1/resourcequota.go | 5 +- .../client-go/informers/core/v1/secret.go | 5 +- .../client-go/informers/core/v1/service.go | 5 +- .../informers/core/v1/serviceaccount.go | 5 +- .../discovery/v1alpha1/endpointslice.go | 5 +- .../discovery/v1beta1/endpointslice.go | 5 +- .../informers/events/v1beta1/event.go | 5 +- .../informers/extensions/v1beta1/daemonset.go | 5 +- .../extensions/v1beta1/deployment.go | 5 +- .../informers/extensions/v1beta1/ingress.go | 5 +- .../extensions/v1beta1/networkpolicy.go | 5 +- .../extensions/v1beta1/podsecuritypolicy.go | 5 +- .../extensions/v1beta1/replicaset.go | 5 +- .../flowcontrol/v1alpha1/flowschema.go | 5 +- .../v1alpha1/prioritylevelconfiguration.go | 5 +- .../informers/networking/v1/networkpolicy.go | 5 +- .../informers/networking/v1beta1/ingress.go | 5 +- .../informers/node/v1alpha1/runtimeclass.go | 5 +- .../informers/node/v1beta1/runtimeclass.go | 5 +- .../policy/v1beta1/poddisruptionbudget.go | 5 +- .../policy/v1beta1/podsecuritypolicy.go | 5 +- .../informers/rbac/v1/clusterrole.go | 5 +- .../informers/rbac/v1/clusterrolebinding.go | 5 +- .../client-go/informers/rbac/v1/role.go | 5 +- .../informers/rbac/v1/rolebinding.go | 5 +- .../informers/rbac/v1alpha1/clusterrole.go | 5 +- .../rbac/v1alpha1/clusterrolebinding.go | 5 +- .../client-go/informers/rbac/v1alpha1/role.go | 5 +- .../informers/rbac/v1alpha1/rolebinding.go | 5 +- .../informers/rbac/v1beta1/clusterrole.go | 5 +- .../rbac/v1beta1/clusterrolebinding.go | 5 +- .../client-go/informers/rbac/v1beta1/role.go | 5 +- .../informers/rbac/v1beta1/rolebinding.go | 5 +- .../informers/scheduling/v1/priorityclass.go | 5 +- .../scheduling/v1alpha1/priorityclass.go | 5 +- .../scheduling/v1beta1/priorityclass.go | 5 +- .../informers/settings/v1alpha1/podpreset.go | 5 +- .../client-go/informers/storage/v1/csinode.go | 5 +- .../informers/storage/v1/storageclass.go | 5 +- .../informers/storage/v1/volumeattachment.go | 5 +- .../storage/v1alpha1/volumeattachment.go | 5 +- .../informers/storage/v1beta1/csidriver.go | 5 +- .../informers/storage/v1beta1/csinode.go | 5 +- .../informers/storage/v1beta1/storageclass.go | 5 +- .../storage/v1beta1/volumeattachment.go | 5 +- .../fake/fake_mutatingwebhookconfiguration.go | 18 +- .../fake_validatingwebhookconfiguration.go | 18 +- .../v1/mutatingwebhookconfiguration.go | 54 +- .../v1/validatingwebhookconfiguration.go | 54 +- .../fake/fake_mutatingwebhookconfiguration.go | 18 +- .../fake_validatingwebhookconfiguration.go | 18 +- .../v1beta1/mutatingwebhookconfiguration.go | 54 +- .../v1beta1/validatingwebhookconfiguration.go | 54 +- .../typed/apps/v1/controllerrevision.go | 54 +- .../kubernetes/typed/apps/v1/daemonset.go | 62 +- .../kubernetes/typed/apps/v1/deployment.go | 75 +-- .../apps/v1/fake/fake_controllerrevision.go | 18 +- .../typed/apps/v1/fake/fake_daemonset.go | 20 +- .../typed/apps/v1/fake/fake_deployment.go | 24 +- .../typed/apps/v1/fake/fake_replicaset.go | 24 +- .../typed/apps/v1/fake/fake_statefulset.go | 24 +- .../kubernetes/typed/apps/v1/replicaset.go | 75 +-- .../kubernetes/typed/apps/v1/statefulset.go | 75 +-- .../typed/apps/v1beta1/controllerrevision.go | 54 +- .../typed/apps/v1beta1/deployment.go | 62 +- .../v1beta1/fake/fake_controllerrevision.go | 18 +- .../apps/v1beta1/fake/fake_deployment.go | 20 +- .../apps/v1beta1/fake/fake_statefulset.go | 20 +- .../typed/apps/v1beta1/statefulset.go | 62 +- .../typed/apps/v1beta2/controllerrevision.go | 54 +- .../typed/apps/v1beta2/daemonset.go | 62 +- .../typed/apps/v1beta2/deployment.go | 62 +- .../v1beta2/fake/fake_controllerrevision.go | 18 +- .../typed/apps/v1beta2/fake/fake_daemonset.go | 20 +- .../apps/v1beta2/fake/fake_deployment.go | 20 +- .../apps/v1beta2/fake/fake_replicaset.go | 20 +- .../apps/v1beta2/fake/fake_statefulset.go | 24 +- .../typed/apps/v1beta2/replicaset.go | 62 +- .../typed/apps/v1beta2/statefulset.go | 75 +-- .../auditregistration/v1alpha1/auditsink.go | 54 +- .../v1alpha1/fake/fake_auditsink.go | 18 +- .../kubernetes/typed/authentication/v1/BUILD | 2 +- .../typed/authentication/v1/fake/BUILD | 3 +- .../v1/fake/fake_tokenreview.go | 23 + .../v1/fake/fake_tokenreview_expansion.go | 33 - .../authentication/v1/generated_expansion.go | 2 + .../typed/authentication/v1/tokenreview.go | 18 + .../v1/tokenreview_expansion.go | 43 -- .../typed/authentication/v1beta1/BUILD | 2 +- .../typed/authentication/v1beta1/fake/BUILD | 3 +- .../v1beta1/fake/fake_tokenreview.go | 23 + .../fake/fake_tokenreview_expansion.go | 33 - .../v1beta1/generated_expansion.go | 2 + .../authentication/v1beta1/tokenreview.go | 18 + .../v1beta1/tokenreview_expansion.go | 43 -- .../kubernetes/typed/authorization/v1/BUILD | 5 +- .../typed/authorization/v1/fake/BUILD | 6 +- .../v1/fake/fake_localsubjectaccessreview.go | 24 + ...fake_localsubjectaccessreview_expansion.go | 33 - .../v1/fake/fake_selfsubjectaccessreview.go | 23 + .../fake_selfsubjectaccessreview_expansion.go | 33 - .../v1/fake/fake_selfsubjectrulesreview.go | 23 + .../fake_selfsubjectrulesreview_expansion.go | 33 - .../v1/fake/fake_subjectaccessreview.go | 23 + .../fake_subjectaccessreview_expansion.go | 36 -- .../authorization/v1/generated_expansion.go | 8 + .../v1/localsubjectaccessreview.go | 19 + .../v1/localsubjectaccessreview_expansion.go | 44 -- .../v1/selfsubjectaccessreview.go | 18 + .../v1/selfsubjectaccessreview_expansion.go | 43 -- .../v1/selfsubjectrulesreview.go | 18 + .../v1/selfsubjectrulesreview_expansion.go | 43 -- .../authorization/v1/subjectaccessreview.go | 18 + .../v1/subjectaccessreview_expansion.go | 44 -- .../typed/authorization/v1beta1/BUILD | 5 +- .../typed/authorization/v1beta1/fake/BUILD | 7 +- .../v1beta1/fake/fake_generated_expansion.go | 17 - .../fake/fake_localsubjectaccessreview.go | 24 + ...fake_localsubjectaccessreview_expansion.go | 33 - .../fake/fake_selfsubjectaccessreview.go | 23 + .../fake_selfsubjectaccessreview_expansion.go | 33 - .../fake/fake_selfsubjectrulesreview.go | 23 + .../fake_selfsubjectrulesreview_expansion.go | 33 - .../v1beta1/fake/fake_subjectaccessreview.go | 23 + .../fake_subjectaccessreview_expansion.go | 33 - .../v1beta1/generated_expansion.go | 8 + .../v1beta1/localsubjectaccessreview.go | 19 + .../localsubjectaccessreview_expansion.go | 44 -- .../v1beta1/selfsubjectaccessreview.go | 18 + .../selfsubjectaccessreview_expansion.go | 43 -- .../v1beta1/selfsubjectrulesreview.go | 18 + .../selfsubjectrulesreview_expansion.go | 43 -- .../v1beta1/subjectaccessreview.go | 18 + .../v1beta1/subjectaccessreview_expansion.go | 44 -- .../v1/fake/fake_horizontalpodautoscaler.go | 20 +- .../autoscaling/v1/horizontalpodautoscaler.go | 62 +- .../fake/fake_horizontalpodautoscaler.go | 20 +- .../v2beta1/horizontalpodautoscaler.go | 62 +- .../fake/fake_horizontalpodautoscaler.go | 20 +- .../v2beta2/horizontalpodautoscaler.go | 62 +- .../typed/batch/v1/fake/fake_job.go | 20 +- .../kubernetes/typed/batch/v1/job.go | 62 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 62 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 20 +- .../typed/batch/v2alpha1/cronjob.go | 62 +- .../typed/batch/v2alpha1/fake/fake_cronjob.go | 20 +- .../v1beta1/certificatesigningrequest.go | 62 +- .../certificatesigningrequest_expansion.go | 4 +- .../fake/fake_certificatesigningrequest.go | 20 +- .../typed/coordination/v1/fake/fake_lease.go | 18 +- .../kubernetes/typed/coordination/v1/lease.go | 54 +- .../coordination/v1beta1/fake/fake_lease.go | 18 +- .../typed/coordination/v1beta1/lease.go | 54 +- .../client-go/kubernetes/typed/core/v1/BUILD | 1 - .../typed/core/v1/componentstatus.go | 54 +- .../kubernetes/typed/core/v1/configmap.go | 54 +- .../kubernetes/typed/core/v1/endpoints.go | 54 +- .../kubernetes/typed/core/v1/event.go | 54 +- .../typed/core/v1/event_expansion.go | 11 +- .../kubernetes/typed/core/v1/fake/BUILD | 1 - .../core/v1/fake/fake_componentstatus.go | 18 +- .../typed/core/v1/fake/fake_configmap.go | 18 +- .../typed/core/v1/fake/fake_endpoints.go | 18 +- .../typed/core/v1/fake/fake_event.go | 18 +- .../typed/core/v1/fake/fake_limitrange.go | 18 +- .../typed/core/v1/fake/fake_namespace.go | 18 +- .../typed/core/v1/fake/fake_node.go | 20 +- .../typed/core/v1/fake/fake_node_expansion.go | 6 +- .../core/v1/fake/fake_persistentvolume.go | 20 +- .../v1/fake/fake_persistentvolumeclaim.go | 20 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 24 +- .../typed/core/v1/fake/fake_podtemplate.go | 18 +- .../v1/fake/fake_replicationcontroller.go | 24 +- .../typed/core/v1/fake/fake_resourcequota.go | 20 +- .../typed/core/v1/fake/fake_secret.go | 18 +- .../typed/core/v1/fake/fake_service.go | 18 +- .../typed/core/v1/fake/fake_serviceaccount.go | 30 +- .../v1/fake/fake_serviceaccount_expansion.go | 31 - .../typed/core/v1/generated_expansion.go | 2 + .../kubernetes/typed/core/v1/limitrange.go | 54 +- .../kubernetes/typed/core/v1/namespace.go | 56 +- .../typed/core/v1/namespace_expansion.go | 8 +- .../kubernetes/typed/core/v1/node.go | 62 +- .../typed/core/v1/node_expansion.go | 10 +- .../typed/core/v1/persistentvolume.go | 62 +- .../typed/core/v1/persistentvolumeclaim.go | 62 +- .../client-go/kubernetes/typed/core/v1/pod.go | 75 +-- .../kubernetes/typed/core/v1/pod_expansion.go | 8 +- .../kubernetes/typed/core/v1/podtemplate.go | 54 +- .../typed/core/v1/replicationcontroller.go | 75 +-- .../kubernetes/typed/core/v1/resourcequota.go | 62 +- .../kubernetes/typed/core/v1/secret.go | 54 +- .../kubernetes/typed/core/v1/service.go | 56 +- .../typed/core/v1/serviceaccount.go | 72 ++- .../typed/core/v1/serviceaccount_expansion.go | 41 -- .../typed/discovery/v1alpha1/endpointslice.go | 54 +- .../v1alpha1/fake/fake_endpointslice.go | 18 +- .../typed/discovery/v1beta1/endpointslice.go | 54 +- .../v1beta1/fake/fake_endpointslice.go | 18 +- .../kubernetes/typed/events/v1beta1/event.go | 54 +- .../typed/events/v1beta1/event_expansion.go | 7 +- .../typed/events/v1beta1/fake/fake_event.go | 18 +- .../typed/extensions/v1beta1/daemonset.go | 62 +- .../typed/extensions/v1beta1/deployment.go | 75 +-- .../v1beta1/deployment_expansion.go | 8 +- .../extensions/v1beta1/fake/fake_daemonset.go | 20 +- .../v1beta1/fake/fake_deployment.go | 24 +- .../extensions/v1beta1/fake/fake_ingress.go | 20 +- .../v1beta1/fake/fake_networkpolicy.go | 18 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 18 +- .../v1beta1/fake/fake_replicaset.go | 24 +- .../typed/extensions/v1beta1/ingress.go | 62 +- .../typed/extensions/v1beta1/networkpolicy.go | 54 +- .../extensions/v1beta1/podsecuritypolicy.go | 54 +- .../typed/extensions/v1beta1/replicaset.go | 75 +-- .../v1alpha1/fake/fake_flowschema.go | 20 +- .../fake/fake_prioritylevelconfiguration.go | 20 +- .../typed/flowcontrol/v1alpha1/flowschema.go | 62 +- .../v1alpha1/prioritylevelconfiguration.go | 62 +- .../networking/v1/fake/fake_networkpolicy.go | 18 +- .../typed/networking/v1/networkpolicy.go | 54 +- .../networking/v1beta1/fake/fake_ingress.go | 20 +- .../typed/networking/v1beta1/ingress.go | 62 +- .../node/v1alpha1/fake/fake_runtimeclass.go | 18 +- .../typed/node/v1alpha1/runtimeclass.go | 54 +- .../node/v1beta1/fake/fake_runtimeclass.go | 18 +- .../typed/node/v1beta1/runtimeclass.go | 54 +- .../policy/v1beta1/eviction_expansion.go | 4 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 20 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 18 +- .../policy/v1beta1/poddisruptionbudget.go | 62 +- .../typed/policy/v1beta1/podsecuritypolicy.go | 54 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 54 +- .../typed/rbac/v1/clusterrolebinding.go | 54 +- .../typed/rbac/v1/fake/fake_clusterrole.go | 18 +- .../rbac/v1/fake/fake_clusterrolebinding.go | 18 +- .../typed/rbac/v1/fake/fake_role.go | 18 +- .../typed/rbac/v1/fake/fake_rolebinding.go | 18 +- .../kubernetes/typed/rbac/v1/role.go | 54 +- .../kubernetes/typed/rbac/v1/rolebinding.go | 54 +- .../typed/rbac/v1alpha1/clusterrole.go | 54 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 54 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 18 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 18 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 18 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 18 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 54 +- .../typed/rbac/v1alpha1/rolebinding.go | 54 +- .../typed/rbac/v1beta1/clusterrole.go | 54 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 54 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 18 +- .../v1beta1/fake/fake_clusterrolebinding.go | 18 +- .../typed/rbac/v1beta1/fake/fake_role.go | 18 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 18 +- .../kubernetes/typed/rbac/v1beta1/role.go | 54 +- .../typed/rbac/v1beta1/rolebinding.go | 54 +- .../scheduling/v1/fake/fake_priorityclass.go | 18 +- .../typed/scheduling/v1/priorityclass.go | 54 +- .../v1alpha1/fake/fake_priorityclass.go | 18 +- .../scheduling/v1alpha1/priorityclass.go | 54 +- .../v1beta1/fake/fake_priorityclass.go | 18 +- .../typed/scheduling/v1beta1/priorityclass.go | 54 +- .../settings/v1alpha1/fake/fake_podpreset.go | 18 +- .../typed/settings/v1alpha1/podpreset.go | 54 +- .../kubernetes/typed/storage/v1/csinode.go | 54 +- .../typed/storage/v1/fake/fake_csinode.go | 18 +- .../storage/v1/fake/fake_storageclass.go | 18 +- .../storage/v1/fake/fake_volumeattachment.go | 20 +- .../typed/storage/v1/storageclass.go | 54 +- .../typed/storage/v1/volumeattachment.go | 62 +- .../v1alpha1/fake/fake_volumeattachment.go | 20 +- .../storage/v1alpha1/volumeattachment.go | 62 +- .../typed/storage/v1beta1/csidriver.go | 54 +- .../typed/storage/v1beta1/csinode.go | 54 +- .../storage/v1beta1/fake/fake_csidriver.go | 18 +- .../storage/v1beta1/fake/fake_csinode.go | 18 +- .../storage/v1beta1/fake/fake_storageclass.go | 18 +- .../v1beta1/fake/fake_volumeattachment.go | 20 +- .../typed/storage/v1beta1/storageclass.go | 54 +- .../typed/storage/v1beta1/volumeattachment.go | 62 +- .../k8s.io/client-go/metadata/metadata.go | 13 +- .../vendor/k8s.io/client-go/rest/request.go | 121 ++-- .../vendor/k8s.io/client-go/scale/client.go | 7 +- .../k8s.io/client-go/testing/actions.go | 12 +- .../k8s.io/client-go/tools/cache/listwatch.go | 6 +- .../k8s.io/client-go/tools/cache/reflector.go | 86 ++- .../client-go/tools/cache/shared_informer.go | 17 +- .../resourcelock/configmaplock.go | 9 +- .../resourcelock/endpointslock.go | 9 +- .../leaderelection/resourcelock/leaselock.go | 9 +- .../k8s.io/client-go/tools/pager/pager.go | 21 +- .../k8s.io/client-go/tools/record/fake.go | 2 +- .../client-go/util/certificate/csr/csr.go | 8 +- .../vendor/k8s.io/cloud-provider/go.mod | 2 +- .../vendor/k8s.io/cloud-provider/go.sum | 17 +- .../cloud-provider/node/helpers/conditions.go | 3 +- .../cloud-provider/node/helpers/labels.go | 7 +- .../cloud-provider/node/helpers/taints.go | 7 +- .../cloud-provider/service/helpers/BUILD | 1 + .../cloud-provider/service/helpers/helper.go | 4 +- .../k8s.io/component-base/metrics/BUILD | 2 - .../vendor/k8s.io/csi-translation-lib/go.sum | 12 +- .../kubernetes/pkg/apis/core/v1/conversion.go | 20 + .../pkg/apis/core/v1/helper/helpers.go | 18 + .../apis/core/v1/zz_generated.conversion.go | 60 +- .../pkg/apis/core/validation/validation.go | 44 +- .../providers/.import-restrictions | 13 - .../pkg/controller/.import-restrictions | 303 --------- .../pkg/controller/client_builder.go | 12 +- .../pkg/controller/client_builder_dynamic.go | 6 +- .../pkg/controller/controller_utils.go | 37 +- .../deployment/util/deployment_util.go | 3 +- .../volume/persistentvolume/util/util.go | 7 + .../volume/scheduling/scheduler_binder.go | 5 +- .../kubernetes/pkg/features/kube_features.go | 13 +- .../pkg/kubelet/apis/podresources/server.go | 2 + .../certificate/bootstrap/bootstrap.go | 3 +- .../pkg/kubelet/cm/cgroup_manager_linux.go | 8 +- .../pkg/kubelet/cm/container_manager.go | 3 + .../pkg/kubelet/cm/container_manager_linux.go | 4 + .../pkg/kubelet/cm/container_manager_stub.go | 4 + .../kubelet/cm/container_manager_windows.go | 4 + .../pkg/kubelet/cm/cpumanager/cpu_manager.go | 4 +- .../kubelet/cm/cpumanager/fake_cpu_manager.go | 2 +- .../pkg/kubelet/cm/cpumanager/policy.go | 2 +- .../pkg/kubelet/cm/cpumanager/policy_none.go | 2 +- .../kubelet/cm/cpumanager/policy_static.go | 4 +- .../pkg/kubelet/cm/devicemanager/manager.go | 10 +- .../kubelet/cm/devicemanager/manager_stub.go | 7 +- .../cm/devicemanager/topology_hints.go | 4 +- .../pkg/kubelet/cm/devicemanager/types.go | 5 +- .../pkg/kubelet/cm/topologymanager/policy.go | 3 +- .../cm/topologymanager/policy_best_effort.go | 12 +- .../kubelet/cm/topologymanager/policy_none.go | 12 +- .../cm/topologymanager/policy_restricted.go | 18 +- .../policy_single_numa_node.go | 15 +- .../cm/topologymanager/topology_manager.go | 51 +- .../kubelet/configmap/configmap_manager.go | 17 +- .../kubernetes/pkg/kubelet/dockershim/BUILD | 2 +- .../pkg/kubelet/dockershim/cm/BUILD | 2 - .../dockershim/cm/container_manager_linux.go | 7 +- .../pkg/kubelet/dockershim/docker_sandbox.go | 11 +- .../dockershim/docker_stats_windows.go | 23 +- .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 7 - .../pkg/kubelet/kubelet_node_status.go | 6 +- .../kubernetes/pkg/kubelet/kubelet_pods.go | 4 +- .../kubeletconfig/checkpoint/download.go | 3 +- .../pkg/kubelet/kubeletconfig/configsync.go | 3 +- .../kubelet/kubeletconfig/status/status.go | 3 +- .../kubernetes/pkg/kubelet/kuberuntime/BUILD | 16 +- .../kuberuntime_container_linux.go | 48 +- .../kuberuntime_container_windows.go | 24 + .../pkg/kubelet/kuberuntime/logs/logs.go | 9 - .../kubernetes/pkg/kubelet/lifecycle/BUILD | 1 - .../pkg/kubelet/lifecycle/predicate.go | 12 +- .../kubernetes/pkg/kubelet/metrics/metrics.go | 10 +- .../pkg/kubelet/nodelease/controller.go | 9 +- .../kubelet/pluginmanager/pluginwatcher/BUILD | 1 - .../pluginwatcher/example_handler.go | 2 +- .../pluginwatcher/example_plugin.go | 2 +- .../pkg/kubelet/pod/mirror_client.go | 5 +- .../kubernetes/pkg/kubelet/qos/policy.go | 7 - .../pkg/kubelet/secret/secret_manager.go | 17 +- .../kubernetes/pkg/kubelet/server/server.go | 56 +- .../pkg/kubelet/stats/cri_stats_provider.go | 2 +- .../pkg/kubelet/status/status_manager.go | 9 +- .../kubernetes/pkg/kubelet/sysctl/BUILD | 2 - .../kubernetes/pkg/kubelet/sysctl/runtime.go | 95 --- .../k8s.io/kubernetes/pkg/kubelet/token/BUILD | 1 + .../pkg/kubelet/token/token_manager.go | 4 +- .../kubernetes/pkg/kubelet/util/manager/BUILD | 7 + .../util/manager/watch_based_manager.go | 66 +- .../desired_state_of_world_populator.go | 5 +- .../volumemanager/reconciler/reconciler.go | 3 +- .../kubernetes/pkg/kubemark/controller.go | 8 +- .../pkg/proxy/apis/config/validation/BUILD | 2 + .../apis/config/validation/validation.go | 33 +- .../kubernetes/pkg/proxy/iptables/proxier.go | 16 +- .../kubernetes/pkg/proxy/ipvs/proxier.go | 43 +- .../kubernetes/pkg/proxy/userspace/BUILD | 1 + .../kubernetes/pkg/proxy/userspace/proxier.go | 22 +- .../k8s.io/kubernetes/pkg/proxy/util/utils.go | 24 +- .../kubernetes/pkg/proxy/winkernel/hnsV1.go | 3 + .../kubernetes/pkg/proxy/winkernel/hnsV2.go | 3 + .../pkg/scheduler/algorithmprovider/BUILD | 2 - .../scheduler/algorithmprovider/registry.go | 163 +++-- .../pkg/scheduler/apis/config/BUILD | 1 + .../pkg/scheduler/apis/config/types.go | 8 +- .../apis/config/zz_generated.deepcopy.go | 4 +- .../default_pod_topology_spread.go | 41 +- .../plugins/interpodaffinity/filtering.go | 150 +---- .../plugins/interpodaffinity/plugin.go | 41 +- .../plugins/interpodaffinity/scoring.go | 38 +- .../framework/plugins/legacy_registry.go | 10 +- .../framework/plugins/nodeports/node_ports.go | 12 +- .../framework/plugins/noderesources/fit.go | 47 +- .../plugins/noderesources/resource_limits.go | 30 +- .../framework/plugins/podtopologyspread/BUILD | 1 + .../plugins/podtopologyspread/filtering.go | 14 +- .../plugins/podtopologyspread/plugin.go | 2 +- .../plugins/podtopologyspread/scoring.go | 40 +- .../framework/plugins/serviceaffinity/BUILD | 1 - .../serviceaffinity/service_affinity.go | 20 +- .../tainttoleration/taint_toleration.go | 32 +- .../scheduler/framework/v1alpha1/framework.go | 28 +- .../scheduler/framework/v1alpha1/interface.go | 24 +- .../framework/v1alpha1/waiting_pods_map.go | 18 +- .../pkg/scheduler/metrics/metrics.go | 6 + .../kubernetes/pkg/scheduler/util/utils.go | 13 +- .../kubernetes/pkg/serviceaccount/BUILD | 6 + .../kubernetes/pkg/serviceaccount/jwt.go | 4 + .../pkg/serviceaccount/openidmetadata.go | 295 +++++++++ .../k8s.io/kubernetes/pkg/util/node/node.go | 11 +- .../k8s.io/kubernetes/pkg/util/pod/pod.go | 3 +- .../kubernetes/pkg/volume/awsebs/aws_util.go | 2 +- .../pkg/volume/azure_dd/azure_provision.go | 6 + .../pkg/volume/azure_file/azure_util.go | 5 +- .../kubernetes/pkg/volume/cephfs/cephfs.go | 3 +- .../pkg/volume/cinder/cinder_util.go | 3 +- .../kubernetes/pkg/volume/csi/csi_attacher.go | 15 +- .../kubernetes/pkg/volume/csi/csi_block.go | 4 +- .../kubernetes/pkg/volume/csi/csi_client.go | 25 +- .../kubernetes/pkg/volume/csi/csi_mounter.go | 3 +- .../kubernetes/pkg/volume/csi/csi_plugin.go | 23 +- .../kubernetes/pkg/volume/csi/csi_util.go | 3 +- .../csi/nodeinfomanager/nodeinfomanager.go | 19 +- .../kubernetes/pkg/volume/fc/attacher.go | 24 +- .../k8s.io/kubernetes/pkg/volume/fc/fc.go | 5 +- .../kubernetes/pkg/volume/fc/fc_util.go | 2 +- .../kubernetes/pkg/volume/flocker/flocker.go | 1 - .../pkg/volume/glusterfs/glusterfs.go | 15 +- .../kubernetes/pkg/volume/iscsi/iscsi.go | 3 +- .../pkg/volume/portworx/portworx.go | 3 +- .../pkg/volume/portworx/portworx_util.go | 3 +- .../k8s.io/kubernetes/pkg/volume/rbd/rbd.go | 7 +- .../operationexecutor/operation_generator.go | 7 +- .../util/recyclerclient/recycler_client.go | 11 +- .../kubernetes/pkg/volume/util/resize_util.go | 5 +- .../k8s.io/kubernetes/pkg/volume/util/util.go | 7 +- .../volume/vsphere_volume/vsphere_volume.go | 8 +- .../kubernetes/test/utils/create_resources.go | 26 +- .../kubernetes/test/utils/delete_resources.go | 19 +- .../kubernetes/test/utils/density_utils.go | 9 +- .../kubernetes/test/utils/deployment.go | 21 +- .../k8s.io/kubernetes/test/utils/pod_store.go | 5 +- .../kubernetes/test/utils/replicaset.go | 11 +- .../k8s.io/kubernetes/test/utils/runners.go | 22 +- .../k8s.io/legacy-cloud-providers/aws/aws.go | 2 +- .../legacy-cloud-providers/aws/aws_fakes.go | 6 - .../azure/auth/azure_auth.go | 2 +- .../legacy-cloud-providers/azure/azure.go | 19 +- .../azure/azure_cache.go | 15 +- .../azure/azure_config.go | 3 +- .../azure/azure_controller_common.go | 13 +- .../azure/azure_controller_standard.go | 14 +- .../azure/azure_controller_vmss.go | 14 +- .../azure/azure_fakes.go | 2 +- .../azure/azure_file.go | 32 +- .../azure/azure_vmsets.go | 2 +- .../azure/azure_vmss.go | 66 +- .../azure/azure_vmss_cache.go | 67 +- .../azure/azure_wrap.go | 7 + .../azure/retry/azure_error.go | 42 +- .../azure/retry/azure_retry.go | 11 +- .../k8s.io/legacy-cloud-providers/gce/BUILD | 2 + .../k8s.io/legacy-cloud-providers/gce/gce.go | 5 +- .../gce/gce_clusterid.go | 3 +- .../legacy-cloud-providers/gce/gce_fake.go | 7 + .../gce/gce_loadbalancer_external.go | 24 +- .../gce/gce_loadbalancer_internal.go | 25 +- .../legacy-cloud-providers/gce/gce_util.go | 118 ++++ .../openstack/openstack.go | 2 +- .../vendor/k8s.io/utils/mount/OWNERS | 1 + .../vendor/k8s.io/utils/mount/mount.go | 31 + .../vendor/k8s.io/utils/mount/mount_linux.go | 164 +++-- .../vendor/k8s.io/utils/net/ipnet.go | 100 +++ cluster-autoscaler/vendor/modules.txt | 52 +- .../v3/fieldpath/fromvalue.go | 34 +- .../v3/fieldpath/serialize.go | 20 +- .../structured-merge-diff/v3/typed/helpers.go | 18 +- .../structured-merge-diff/v3/typed/merge.go | 86 ++- .../structured-merge-diff/v3/typed/remove.go | 36 +- .../v3/typed/tofieldset.go | 16 +- .../structured-merge-diff/v3/typed/typed.go | 3 + .../v3/typed/validate.go | 19 +- .../v3/value/allocator.go | 203 ++++++ .../structured-merge-diff/v3/value/fields.go | 2 +- .../structured-merge-diff/v3/value/list.go | 95 ++- .../v3/value/listreflect.go | 60 +- .../v3/value/listunstructured.go | 47 ++ .../structured-merge-diff/v3/value/map.go | 253 ++++++-- .../v3/value/mapreflect.go | 134 +++- .../v3/value/mapunstructured.go | 129 ++-- .../v3/value/reflectcache.go | 463 ++++++++++++++ .../v3/value/structreflect.go | 237 +++---- .../structured-merge-diff/v3/value/value.go | 52 +- .../v3/value/valuereflect.go | 279 ++++---- .../v3/value/valueunstructured.go | 29 +- .../vendor/sigs.k8s.io/yaml/.travis.yml | 15 +- .../vendor/sigs.k8s.io/yaml/OWNERS | 2 + .../vendor/sigs.k8s.io/yaml/README.md | 14 +- .../vendor/sigs.k8s.io/yaml/go.mod | 8 + .../vendor/sigs.k8s.io/yaml/go.sum | 9 + .../vendor/sigs.k8s.io/yaml/yaml.go | 61 ++ 677 files changed, 15680 insertions(+), 7757 deletions(-) create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/.gitignore create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/LICENSE create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/abi.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu_string.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/doc.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump_string.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store_string.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/feature.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/go.mod create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/go.sum create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/cpu.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_be.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_le.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_64.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/readme.md create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go create mode 100644 cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go rename cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/{fs => fscommon}/utils.go (83%) delete mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/types/events.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go delete mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/openidmetadata.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.mod create mode 100644 cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.sum diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index 527c926c3c..4dcff6e59a 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -28,6 +28,7 @@ require ( gopkg.in/yaml.v2 v2.2.8 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 k8s.io/client-go v0.0.0 k8s.io/cloud-provider v0.0.0 k8s.io/component-base v0.0.0 @@ -89,6 +90,7 @@ replace ( github.com/chai2010/gettext-go => github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 github.com/checkpoint-restore/go-criu => github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48 // 17b0214f6c48 is the SHA for git tag 3.11 github.com/cheekybits/genny => github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 + github.com/cilium/ebpf => github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed github.com/client9/misspell => github.com/client9/misspell v0.3.4 github.com/clusterhq/flocker-go => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa @@ -116,7 +118,7 @@ replace ( github.com/docker/docker => github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 github.com/docker/go-connections => github.com/docker/go-connections v0.3.0 github.com/docker/go-units => github.com/docker/go-units v0.4.0 - github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 + github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 github.com/docker/spdystream => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.0 github.com/elazarl/goproxy => github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 // 947c36da3153 is the SHA for git tag v1.11 @@ -195,7 +197,7 @@ replace ( github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 github.com/google/go-github => github.com/google/go-github v17.0.0+incompatible github.com/google/go-querystring => github.com/google/go-querystring v1.0.0 - github.com/google/gofuzz => github.com/google/gofuzz v1.0.0 + github.com/google/gofuzz => github.com/google/gofuzz v1.1.0 github.com/google/martian => github.com/google/martian v2.1.0+incompatible github.com/google/pprof => github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 github.com/google/renameio => github.com/google/renameio v0.1.0 @@ -285,7 +287,7 @@ replace ( github.com/onsi/gomega => github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1 - github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9 + github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc10 github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/selinux => github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 @@ -295,7 +297,7 @@ replace ( github.com/pquerna/cachecontrol => github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 github.com/pquerna/ffjson => github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20 github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.0.0 - github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 + github.com/prometheus/client_model => github.com/prometheus/client_model v0.2.0 github.com/prometheus/common => github.com/prometheus/common v0.4.1 github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.2 github.com/quasilyte/go-consistent => github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c @@ -350,7 +352,7 @@ replace ( github.com/xlab/handysort => github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.3 - go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 + go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 // 3cf2f69b5738 is the SHA for git tag v3.4.3 go.mongodb.org/mongo-driver => go.mongodb.org/mongo-driver v1.1.2 go.opencensus.io => go.opencensus.io v0.21.0 go.uber.org/atomic => go.uber.org/atomic v1.3.2 @@ -400,37 +402,36 @@ replace ( gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 grpc.go4.org => grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 - k8s.io/api => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/api - k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/apiextensions-apiserver - k8s.io/apimachinery => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/apimachinery - k8s.io/apiserver => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/apiserver - k8s.io/cli-runtime => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cli-runtime - k8s.io/client-go => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/client-go - k8s.io/cloud-provider => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cloud-provider - k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cluster-bootstrap - k8s.io/code-generator => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/code-generator - k8s.io/component-base => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/component-base - k8s.io/cri-api => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cri-api - k8s.io/csi-translation-lib => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/csi-translation-lib - k8s.io/gengo => k8s.io/gengo v0.0.0-20190822140433-26a664648505 + k8s.io/api => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/api + k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/apiextensions-apiserver + k8s.io/apimachinery => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/apimachinery + k8s.io/apiserver => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/apiserver + k8s.io/cli-runtime => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cli-runtime + k8s.io/client-go => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/client-go + k8s.io/cloud-provider => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cloud-provider + k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cluster-bootstrap + k8s.io/code-generator => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/code-generator + k8s.io/component-base => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/component-base + k8s.io/cri-api => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cri-api + k8s.io/csi-translation-lib => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/csi-translation-lib + k8s.io/gengo => k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 k8s.io/klog => k8s.io/klog v1.0.0 - k8s.io/kube-aggregator => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kube-aggregator - k8s.io/kube-controller-manager => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kube-controller-manager + k8s.io/kube-aggregator => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kube-aggregator + k8s.io/kube-controller-manager => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kube-controller-manager k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c - k8s.io/kube-proxy => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kube-proxy - k8s.io/kube-scheduler => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kube-scheduler - k8s.io/kubectl => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kubectl - k8s.io/kubelet => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kubelet - k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/legacy-cloud-providers - k8s.io/metrics => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/metrics - k8s.io/node-api => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/node-api + k8s.io/kube-proxy => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kube-proxy + k8s.io/kube-scheduler => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kube-scheduler + k8s.io/kubectl => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kubectl + k8s.io/kubelet => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kubelet + k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/legacy-cloud-providers + k8s.io/metrics => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/metrics k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1 - k8s.io/sample-apiserver => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/sample-apiserver - k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/sample-cli-plugin - k8s.io/sample-controller => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/sample-controller + k8s.io/sample-apiserver => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/sample-apiserver + k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/sample-cli-plugin + k8s.io/sample-controller => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/sample-controller k8s.io/system-validators => k8s.io/system-validators v1.0.4 - k8s.io/utils => k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc + k8s.io/utils => k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 modernc.org/cc => modernc.org/cc v1.0.0 modernc.org/golex => modernc.org/golex v1.0.0 modernc.org/mathutil => modernc.org/mathutil v1.0.0 @@ -441,8 +442,8 @@ replace ( mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 rsc.io/pdf => rsc.io/pdf v0.1.1 sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible - sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd - sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.1.0 + sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c + sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.2.0 sourcegraph.com/sqs/pbtypes => sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc ) @@ -451,4 +452,4 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/kubernetes => /tmp/ca-update-vendor.RRRk/kubernetes +replace k8s.io/kubernetes => /tmp/ca-update-vendor.CTcs/kubernetes diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index 3bfbc3a21c..83da9b40ec 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -77,6 +77,8 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1 github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48 h1:AQMF0Xixllgf29MKlx/TGEhRk7bEDX5kxz8Ui8lOvEs= github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed h1:/UgmF+cZTm9kp4uJ122y/9cVhczNJCgAgAeH2FfzPeg= +github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= @@ -119,14 +121,14 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:sLjcXL0ozEc1CVv4OhvrLFbieIL8OxgtFTMykm8idvo= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 h1:8GFZB1KesbMy2X2zTiJyAuwCow+U1GT0ueD42p59y4k= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 h1:alzR0hpQ/vaeYQAiqpCzrcbDbGMBAghmjT8nYe0To3I= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -228,8 +230,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -283,7 +285,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5 h1:JQFiMR65pT543bkWP46+k194gS999qo/OYccos9cOXg= +github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -365,8 +367,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc10 h1:AbmCEuSZXVflng0/cboQkpdEOeBsPMjz6tmq4Pv8MZw= +github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 h1:B8hYj3NxHmjsC3T+tnlZ1UhInqUgnyF1zlGPmzNg2Qk= @@ -382,8 +384,8 @@ github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20 h1:7sBb9iOkeq+O7AXl github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= @@ -533,12 +535,12 @@ gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible h1:y0IMTfclpMdsdIbr6uwmJn5/WZ7vFuObxDMdrylFM3A= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -546,8 +548,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDN k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54= -k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 h1:KCcLuc/HD1RogJgEbZi9ObRuLv1bgiRCfAbidLKrUpg= +k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -558,9 +560,9 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jC mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/.gitignore b/cluster-autoscaler/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/cluster-autoscaler/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8e42838c5a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/LICENSE b/cluster-autoscaler/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 0000000000..c637ae99c2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/abi.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/abi.go new file mode 100644 index 0000000000..999b8cc859 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/abi.go @@ -0,0 +1,183 @@ +package ebpf + +import ( + "github.com/pkg/errors" +) + +// CollectionABI describes the interface of an eBPF collection. +type CollectionABI struct { + Maps map[string]*MapABI + Programs map[string]*ProgramABI +} + +// CheckSpec verifies that all maps and programs mentioned +// in the ABI are present in the spec. +func (abi *CollectionABI) CheckSpec(cs *CollectionSpec) error { + for name := range abi.Maps { + if cs.Maps[name] == nil { + return errors.Errorf("missing map %s", name) + } + } + + for name := range abi.Programs { + if cs.Programs[name] == nil { + return errors.Errorf("missing program %s", name) + } + } + + return nil +} + +// Check verifies that all items in a collection conform to this ABI. +func (abi *CollectionABI) Check(coll *Collection) error { + for name, mapABI := range abi.Maps { + m := coll.Maps[name] + if m == nil { + return errors.Errorf("missing map %s", name) + } + if err := mapABI.Check(m); err != nil { + return errors.Wrapf(err, "map %s", name) + } + } + + for name, progABI := range abi.Programs { + p := coll.Programs[name] + if p == nil { + return errors.Errorf("missing program %s", name) + } + if err := progABI.Check(p); err != nil { + return errors.Wrapf(err, "program %s", name) + } + } + + return nil +} + +// MapABI describes a Map. +// +// Use it to assert that a Map matches what your code expects. +type MapABI struct { + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + InnerMap *MapABI +} + +func newMapABIFromSpec(spec *MapSpec) *MapABI { + var inner *MapABI + if spec.InnerMap != nil { + inner = newMapABIFromSpec(spec.InnerMap) + } + + return &MapABI{ + spec.Type, + spec.KeySize, + spec.ValueSize, + spec.MaxEntries, + inner, + } +} + +func newMapABIFromFd(fd *bpfFD) (*MapABI, error) { + info, err := bpfGetMapInfoByFD(fd) + if err != nil { + return nil, err + } + + mapType := MapType(info.mapType) + if mapType == ArrayOfMaps || mapType == HashOfMaps { + return nil, errors.New("can't get map info for nested maps") + } + + return &MapABI{ + mapType, + info.keySize, + info.valueSize, + info.maxEntries, + nil, + }, nil +} + +// Check verifies that a Map conforms to the ABI. +// +// Members of ABI which have the zero value of their type are not checked. +func (abi *MapABI) Check(m *Map) error { + return abi.check(&m.abi) +} + +func (abi *MapABI) check(other *MapABI) error { + if abi.Type != UnspecifiedMap && other.Type != abi.Type { + return errors.Errorf("expected map type %s, have %s", abi.Type, other.Type) + } + if err := checkUint32("key size", abi.KeySize, other.KeySize); err != nil { + return err + } + if err := checkUint32("value size", abi.ValueSize, other.ValueSize); err != nil { + return err + } + if err := checkUint32("max entries", abi.MaxEntries, other.MaxEntries); err != nil { + return err + } + + if abi.InnerMap == nil { + if abi.Type == ArrayOfMaps || abi.Type == HashOfMaps { + return errors.New("missing inner map ABI") + } + + return nil + } + + if other.InnerMap == nil { + return errors.New("missing inner map") + } + + return errors.Wrap(abi.InnerMap.check(other.InnerMap), "inner map") +} + +// ProgramABI describes a Program. +// +// Use it to assert that a Program matches what your code expects. +type ProgramABI struct { + Type ProgramType +} + +func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI { + return &ProgramABI{ + spec.Type, + } +} + +func newProgramABIFromFd(fd *bpfFD) (*ProgramABI, error) { + info, err := bpfGetProgInfoByFD(fd) + if err != nil { + return nil, err + } + + return newProgramABIFromInfo(info), nil +} + +func newProgramABIFromInfo(info *bpfProgInfo) *ProgramABI { + return &ProgramABI{ + Type: ProgramType(info.progType), + } +} + +// Check verifies that a Program conforms to the ABI. +// +// Members which have the zero value of their type +// are not checked. +func (abi *ProgramABI) Check(prog *Program) error { + if abi.Type != UnspecifiedProgram && prog.abi.Type != abi.Type { + return errors.Errorf("expected program type %s, have %s", abi.Type, prog.abi.Type) + } + + return nil +} + +func checkUint32(name string, want, have uint32) error { + if want != 0 && have != want { + return errors.Errorf("expected %s to be %d, have %d", name, want, have) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 0000000000..70ccc4d151 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,149 @@ +package asm + +//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +----+-+---+ +// |op |S|cls| +// +----+-+---+ +type Source uint8 + +const sourceMask OpCode = 0x08 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xff + // ImmSource src is from constant + ImmSource Source = 0x00 + // RegSource src is from register + RegSource Source = 0x08 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type ALUOp uint8 + +const aluMask OpCode = 0xf0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xff + // Add - addition + Add ALUOp = 0x00 + // Sub - subtraction + Sub ALUOp = 0x10 + // Mul - multiplication + Mul ALUOp = 0x20 + // Div - division + Div ALUOp = 0x30 + // Or - bitwise or + Or ALUOp = 0x40 + // And - bitwise and + And ALUOp = 0x50 + // LSh - bitwise shift left + LSh ALUOp = 0x60 + // RSh - bitwise shift right + RSh ALUOp = 0x70 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x80 + // Mod - modulo + Mod ALUOp = 0x90 + // Xor - bitwise xor + Xor ALUOp = 0xa0 + // Mov - move value from one place to another + Mov ALUOp = 0xb0 + // ArSh - arithmatic shift + ArSh ALUOp = 0xc0 + // Swap - endian conversions + Swap ALUOp = 0xd0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 0000000000..72d3fe6292 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,107 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-255] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 255: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-255] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 255: _ALUOp_name[43:55], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/doc.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 0000000000..7031bdc276 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 0000000000..97f794cdb2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,143 @@ +package asm + +//go:generate stringer -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc int32 + +// eBPF built-in functions +// +// You can renegerate this list using the following gawk script: +// +// /FN\(.+\),/ { +// match($1, /\((.+)\)/, r) +// split(r[1], p, "_") +// printf "Fn" +// for (i in p) { +// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) +// } +// print "" +// } +// +// The script expects include/uapi/linux/bpf.h as it's input. +const ( + FnUnspec BuiltinFunc = iota + FnMapLookupElem + FnMapUpdateElem + FnMapDeleteElem + FnProbeRead + FnKtimeGetNs + FnTracePrintk + FnGetPrandomU32 + FnGetSmpProcessorId + FnSkbStoreBytes + FnL3CsumReplace + FnL4CsumReplace + FnTailCall + FnCloneRedirect + FnGetCurrentPidTgid + FnGetCurrentUidGid + FnGetCurrentComm + FnGetCgroupClassid + FnSkbVlanPush + FnSkbVlanPop + FnSkbGetTunnelKey + FnSkbSetTunnelKey + FnPerfEventRead + FnRedirect + FnGetRouteRealm + FnPerfEventOutput + FnSkbLoadBytes + FnGetStackid + FnCsumDiff + FnSkbGetTunnelOpt + FnSkbSetTunnelOpt + FnSkbChangeProto + FnSkbChangeType + FnSkbUnderCgroup + FnGetHashRecalc + FnGetCurrentTask + FnProbeWriteUser + FnCurrentTaskUnderCgroup + FnSkbChangeTail + FnSkbPullData + FnCsumUpdate + FnSetHashInvalid + FnGetNumaNodeId + FnSkbChangeHead + FnXdpAdjustHead + FnProbeReadStr + FnGetSocketCookie + FnGetSocketUid + FnSetHash + FnSetsockopt + FnSkbAdjustRoom + FnRedirectMap + FnSkRedirectMap + FnSockMapUpdate + FnXdpAdjustMeta + FnPerfEventReadValue + FnPerfProgReadValue + FnGetsockopt + FnOverrideReturn + FnSockOpsCbFlagsSet + FnMsgRedirectMap + FnMsgApplyBytes + FnMsgCorkBytes + FnMsgPullData + FnBind + FnXdpAdjustTail + FnSkbGetXfrmState + FnGetStack + FnSkbLoadBytesRelative + FnFibLookup + FnSockHashUpdate + FnMsgRedirectHash + FnSkRedirectHash + FnLwtPushEncap + FnLwtSeg6StoreBytes + FnLwtSeg6AdjustSrh + FnLwtSeg6Action + FnRcRepeat + FnRcKeydown + FnSkbCgroupId + FnGetCurrentCgroupId + FnGetLocalStorage + FnSkSelectReuseport + FnSkbAncestorCgroupId + FnSkLookupTcp + FnSkLookupUdp + FnSkRelease + FnMapPushElem + FnMapPopElem + FnMapPeekElem + FnMsgPushData + FnMsgPopData + FnRcPointerRel + FnSpinLock + FnSpinUnlock + FnSkFullsock + FnTcpSock + FnSkbEcnSetCe + FnGetListenerSock + FnSkcLookupTcp + FnTcpCheckSyncookie + FnSysctlGetName + FnSysctlGetCurrentValue + FnSysctlGetNewValue + FnSysctlSetNewValue + FnStrtol + FnStrtoul + FnSkStorageGet + FnSkStorageDelete + FnSendSignal + FnTcpGenSyncookie +) + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 0000000000..8860b9fdb4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,133 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] +} + +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookie" + +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632} + +func (i BuiltinFunc) String() string { + if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 0000000000..c8ed6cfb49 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,416 @@ +package asm + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "strings" + + "github.com/pkg/errors" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + Reference string + Symbol string +} + +// Sym creates a symbol. +func (ins Instruction) Sym(name string) Instruction { + ins.Symbol = name + return ins +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { + var bi bpfInstruction + err := binary.Read(r, bo, &bi) + if err != nil { + return 0, err + } + + ins.OpCode = bi.OpCode + ins.Dst = bi.Registers.Dst() + ins.Src = bi.Registers.Src() + ins.Offset = bi.Offset + ins.Constant = int64(bi.Constant) + + if !bi.OpCode.isDWordLoad() { + return InstructionSize, nil + } + + var bi2 bpfInstruction + if err := binary.Read(r, bo, &bi2); err != nil { + // No Wrap, to avoid io.EOF clash + return 0, errors.New("64bit immediate is missing second half") + } + if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { + return 0, errors.New("64bit immediate has non-zero fields") + } + ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) + + return 2 * InstructionSize, nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.isDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + bpfi := bpfInstruction{ + ins.OpCode, + newBPFRegisters(ins.Dst, ins.Src), + ins.Offset, + cons, + } + + if err := binary.Write(w, bo, &bpfi); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + bpfi = bpfInstruction{ + Constant: int32(ins.Constant >> 32), + } + + if err := binary.Write(w, bo, &bpfi); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the fd is invalid, or the instruction +// is incorrect. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.OpCode.isDWordLoad() { + return errors.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if fd < 0 { + return errors.New("invalid fd") + } + + ins.Src = R1 + ins.Constant = int64(fd) + return nil +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + fmt.Fprintf(f, "%v ", op) + switch cls := op.Class(); cls { + case LdClass, LdXClass, StClass, StXClass: + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case XAddMode: + fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + } + + case ALU64Class, ALUClass: + fmt.Fprintf(f, "dst: %s ", ins.Dst) + if op.ALUOp() == Swap || op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case JumpClass: + switch jop := op.JumpOp(); jop { + case Call: + if ins.Src == R1 { + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + } else { + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + } + + if ins.Reference != "" { + fmt.Fprintf(f, " <%s>", ins.Reference) + } +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + found := false + for i := range insns { + ins := &insns[i] + if ins.Reference != symbol { + continue + } + + if err := ins.RewriteMapPtr(fd); err != nil { + return err + } + + found = true + } + + if !found { + return &unreferencedSymbolError{symbol} + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol == "" { + continue + } + + if _, ok := offsets[ins.Symbol]; ok { + return nil, errors.Errorf("duplicate symbol %s", ins.Symbol) + } + + offsets[ins.Symbol] = i + } + + return offsets, nil +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference == "" { + continue + } + + offsets[ins.Reference] = append(offsets[ins.Reference], i) + } + + return offsets +} + +func (insns Instructions) marshalledOffsets() (map[string]int, error) { + symbols := make(map[string]int) + + marshalledPos := 0 + for _, ins := range insns { + currentPos := marshalledPos + marshalledPos += ins.OpCode.marshalledInstructions() + + if ins.Symbol == "" { + continue + } + + if _, ok := symbols[ins.Symbol]; ok { + return nil, errors.Errorf("duplicate symbol %s", ins.Symbol) + } + + symbols[ins.Symbol] = currentPos + } + + return symbols, nil +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overriden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Figure out how many digits we need to represent the highest + // offset. + highestOffset := 0 + for _, ins := range insns { + highestOffset += ins.OpCode.marshalledInstructions() + } + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + offset := 0 + for _, ins := range insns { + if ins.Symbol != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, ins.Symbol) + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, offset, ins) + offset += ins.OpCode.marshalledInstructions() + } + + return +} + +// Marshal encodes a BPF program into the kernel format. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + absoluteOffsets, err := insns.marshalledOffsets() + if err != nil { + return err + } + + num := 0 + for i, ins := range insns { + switch { + case ins.OpCode.JumpOp() == Call && ins.Constant == -1: + // Rewrite bpf to bpf call + offset, ok := absoluteOffsets[ins.Reference] + if !ok { + return errors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference) + } + + ins.Constant = int64(offset - num - 1) + + case ins.OpCode.Class() == JumpClass && ins.Offset == -1: + // Rewrite jump to label + offset, ok := absoluteOffsets[ins.Reference] + if !ok { + return errors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference) + } + + ins.Offset = int16(offset - num - 1) + } + + n, err := ins.Marshal(w, bo) + if err != nil { + return errors.Wrapf(err, "instruction %d", i) + } + + num += int(n / InstructionSize) + } + return nil +} + +type bpfInstruction struct { + OpCode OpCode + Registers bpfRegisters + Offset int16 + Constant int32 +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register) bpfRegisters { + return bpfRegisters((src << 4) | (dst & 0xF)) +} + +func (r bpfRegisters) Dst() Register { + return Register(r & 0xF) +} + +func (r bpfRegisters) Src() Register { + return Register(r >> 4) +} + +type unreferencedSymbolError struct { + symbol string +} + +func (use *unreferencedSymbolError) Error() string { + return fmt.Sprintf("unreferenced symbol %s", use.symbol) +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +func IsUnreferencedSymbol(err error) bool { + _, ok := err.(*unreferencedSymbolError) + return ok +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 0000000000..33c9b56562 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,109 @@ +package asm + +//go:generate stringer -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = aluMask + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + if op == Exit || op == Call || op == Ja { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + Reference: label, + } +} + +// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + if op == Exit || op == Call || op == Ja { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), + Dst: dst, + Src: src, + Offset: -1, + Reference: label, + } +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: R1, + Constant: -1, + Reference: label, + } + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + Reference: label, + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 0000000000..85a4aaffa5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 0000000000..ab0e92fc3c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,189 @@ +package asm + +//go:generate stringer -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // XAddMode - add atomically across processors. + XAddMode Mode = 0xc0 +) + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: R1, + Constant: int64(fd), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return OpCode(StXClass).SetMode(XAddMode).SetSize(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreXAddOp(size), + Dst: dst, + Src: src, + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 0000000000..76d29a0756 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[XAddMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "XAddMode" + _Mode_name_5 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 192: + return _Mode_name_4 + case i == 255: + return _Mode_name_5 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 0000000000..d796de3fe0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,237 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate stringer -output opcode_string.go -type=Class + +type encoding int + +const ( + unknownEncoding encoding = iota + loadOrStore + jumpOrALU +) + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass load memory + LdClass Class = 0x00 + // LdXClass load memory from constant + LdXClass Class = 0x01 + // StClass load register from memory + StClass Class = 0x02 + // StXClass load register from constant + StXClass Class = 0x03 + // ALUClass arithmetic operators + ALUClass Class = 0x04 + // JumpClass jump operators + JumpClass Class = 0x05 + // ALU64Class arithmetic in 64 bit mode + ALU64Class Class = 0x07 +) + +func (cls Class) encoding() encoding { + switch cls { + case LdClass, LdXClass, StClass, StXClass: + return loadOrStore + case ALU64Class, ALUClass, JumpClass: + return jumpOrALU + default: + return unknownEncoding + } +} + +// OpCode is a packed eBPF opcode. +// +// Its encoding is defined by a Class value: +// +// msb lsb +// +----+-+---+ +// | ???? |CLS| +// +----+-+---+ +type OpCode uint8 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xff + +// marshalledInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) marshalledInstructions() int { + if op == LoadImmOp(DWord) { + return 2 + } + return 1 +} + +func (op OpCode) isDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if op.Class().encoding() != loadOrStore { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if op.Class().encoding() != loadOrStore { + return InvalidSize + } + return Size(op & sizeMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if op.Class().encoding() != jumpOrALU { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +func (op OpCode) JumpOp() JumpOp { + if op.Class().encoding() != jumpOrALU { + return InvalidJumpOp + } + return JumpOp(op & jumpMask) +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + class := op.Class() + if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + return (op & ^jumpMask) | OpCode(jump) +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); class { + case LdClass, LdXClass, StClass, StXClass: + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case ALU64Class, ALUClass: + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } else { + if class == ALUClass { + f.WriteString("32") + } + + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + case JumpClass: + f.WriteString(op.JumpOp().String()) + if jop := op.JumpOp(); jop != Exit && jop != Call { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "%#x", op) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 0000000000..079ce1db0b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[ALU64Class-7] +} + +const ( + _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" + _Class_name_1 = "ALU64Class" +) + +var ( + _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} +) + +func (i Class) String() string { + switch { + case 0 <= i && i <= 5: + return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] + case i == 7: + return _Class_name_1 + default: + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 0000000000..4f284fbe7d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,42 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 0000000000..5ad1a5ec4b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,148 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/asm" + "github.com/pkg/errors" +) + +// CollectionOptions control loading a collection into the kernel. +type CollectionOptions struct { + Programs ProgramOptions +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + } + + for name, spec := range cs.Maps { + cpy.Maps[name] = spec.Copy() + } + + for name, spec := range cs.Programs { + cpy.Programs[name] = spec.Copy() + } + + return &cpy +} + +// Collection is a collection of Programs and Maps associated +// with their symbols +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map +} + +// NewCollection creates a Collection from a specification. +// +// Only maps referenced by at least one of the programs are initialized. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from a specification. +// +// Only maps referenced by at least one of the programs are initialized. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + maps := make(map[string]*Map) + for mapName, mapSpec := range spec.Maps { + m, err := NewMap(mapSpec) + if err != nil { + return nil, errors.Wrapf(err, "map %s", mapName) + } + maps[mapName] = m + } + + progs := make(map[string]*Program) + for progName, origProgSpec := range spec.Programs { + progSpec := origProgSpec.Copy() + + // Rewrite any reference to a valid map. + for i := range progSpec.Instructions { + var ( + ins = &progSpec.Instructions[i] + m = maps[ins.Reference] + ) + + if ins.Reference == "" || m == nil { + continue + } + + if ins.Src == asm.R1 { + // Don't overwrite maps already rewritten, users can + // rewrite programs in the spec themselves + continue + } + + if err := ins.RewriteMapPtr(m.FD()); err != nil { + return nil, errors.Wrapf(err, "progam %s: map %s", progName, ins.Reference) + } + } + + prog, err := NewProgramWithOptions(progSpec, opts.Programs) + if err != nil { + return nil, errors.Wrapf(err, "program %s", progName) + } + progs[progName] = prog + } + + return &Collection{ + progs, + maps, + }, nil +} + +// LoadCollection parses an object file and converts it to a collection. +func LoadCollection(file string) (*Collection, error) { + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 0000000000..d96e6b1e6d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,17 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// This package doesn't include code required to attach eBPF to Linux +// subsystems, since this varies per subsystem. +package ebpf diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 0000000000..3bdc0849bd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,392 @@ +package ebpf + +import ( + "bytes" + "debug/elf" + "encoding/binary" + "fmt" + "io" + "os" + "strings" + + "github.com/cilium/ebpf/asm" + + "github.com/pkg/errors" +) + +type elfCode struct { + *elf.File + symbols []elf.Symbol + symbolsPerSection map[elf.SectionIndex]map[uint64]string +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + return spec, errors.Wrapf(err, "file %s", file) +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(code io.ReaderAt) (*CollectionSpec, error) { + f, err := elf.NewFile(code) + if err != nil { + return nil, err + } + defer f.Close() + + symbols, err := f.Symbols() + if err != nil { + return nil, errors.Wrap(err, "load symbols") + } + + ec := &elfCode{f, symbols, symbolsPerSection(symbols)} + + var licenseSection, versionSection *elf.Section + progSections := make(map[elf.SectionIndex]*elf.Section) + relSections := make(map[elf.SectionIndex]*elf.Section) + mapSections := make(map[elf.SectionIndex]*elf.Section) + for i, sec := range ec.Sections { + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + mapSections[elf.SectionIndex(i)] = sec + case sec.Type == elf.SHT_REL: + if int(sec.Info) >= len(ec.Sections) { + return nil, errors.Errorf("found relocation section %v for missing section %v", i, sec.Info) + } + + // Store relocations under the section index of the target + idx := elf.SectionIndex(sec.Info) + if relSections[idx] != nil { + return nil, errors.Errorf("section %d has multiple relocation sections", idx) + } + relSections[idx] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + progSections[elf.SectionIndex(i)] = sec + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, errors.Wrap(err, "load license") + } + + version, err := loadVersion(versionSection, ec.ByteOrder) + if err != nil { + return nil, errors.Wrap(err, "load version") + } + + maps, err := ec.loadMaps(mapSections) + if err != nil { + return nil, errors.Wrap(err, "load maps") + } + + progs, libs, err := ec.loadPrograms(progSections, relSections, license, version) + if err != nil { + return nil, errors.Wrap(err, "load programs") + } + + if len(libs) > 0 { + for name, prog := range progs { + prog.Instructions, err = link(prog.Instructions, libs...) + if err != nil { + return nil, errors.Wrapf(err, "program %s", name) + } + } + } + + return &CollectionSpec{maps, progs}, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", errors.Errorf("missing license section") + } + data, err := sec.Data() + if err != nil { + return "", errors.Wrapf(err, "section %s", sec.Name) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + err := binary.Read(sec.Open(), bo, &version) + return version, errors.Wrapf(err, "section %s", sec.Name) +} + +func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*elf.Section, license string, version uint32) (map[string]*ProgramSpec, []asm.Instructions, error) { + var ( + progs = make(map[string]*ProgramSpec) + libs []asm.Instructions + ) + for idx, prog := range progSections { + syms := ec.symbolsPerSection[idx] + if len(syms) == 0 { + return nil, nil, errors.Errorf("section %v: missing symbols", prog.Name) + } + + funcSym := syms[0] + if funcSym == "" { + return nil, nil, errors.Errorf("section %v: no label at start", prog.Name) + } + + rels, err := ec.loadRelocations(relSections[idx]) + if err != nil { + return nil, nil, errors.Wrapf(err, "program %s: can't load relocations", funcSym) + } + + insns, err := ec.loadInstructions(prog, syms, rels) + if err != nil { + return nil, nil, errors.Wrapf(err, "program %s: can't unmarshal instructions", funcSym) + } + + if progType, attachType := getProgType(prog.Name); progType == UnspecifiedProgram { + // There is no single name we can use for "library" sections, + // since they may contain multiple functions. We'll decode the + // labels they contain later on, and then link sections that way. + libs = append(libs, insns) + } else { + progs[funcSym] = &ProgramSpec{ + Name: funcSym, + Type: progType, + AttachType: attachType, + License: license, + KernelVersion: version, + Instructions: insns, + } + } + } + return progs, libs, nil +} + +func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]string) (asm.Instructions, error) { + var ( + r = section.Open() + insns asm.Instructions + ins asm.Instruction + offset uint64 + ) + for { + n, err := ins.Unmarshal(r, ec.ByteOrder) + if err == io.EOF { + return insns, nil + } + if err != nil { + return nil, errors.Wrapf(err, "offset %d", offset) + } + + ins.Symbol = symbols[offset] + ins.Reference = relocations[offset] + + insns = append(insns, ins) + offset += n + } +} + +func (ec *elfCode) loadMaps(mapSections map[elf.SectionIndex]*elf.Section) (map[string]*MapSpec, error) { + var ( + maps = make(map[string]*MapSpec) + b = make([]byte, 1) + ) + for idx, sec := range mapSections { + syms := ec.symbolsPerSection[idx] + if len(syms) == 0 { + return nil, errors.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(len(syms)) != 0 { + return nil, errors.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + var ( + r = sec.Open() + size = sec.Size / uint64(len(syms)) + ) + for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size { + mapSym := syms[offset] + if mapSym == "" { + fmt.Println(syms) + return nil, errors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + if maps[mapSym] != nil { + return nil, errors.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + var spec MapSpec + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return nil, errors.Errorf("map %v: missing type", mapSym) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return nil, errors.Errorf("map %v: missing key size", mapSym) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return nil, errors.Errorf("map %v: missing value size", mapSym) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return nil, errors.Errorf("map %v: missing max entries", mapSym) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return nil, errors.Errorf("map %v: missing flags", mapSym) + } + + for { + _, err := lr.Read(b) + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if b[0] != 0 { + return nil, errors.Errorf("map %v: unknown and non-zero fields in definition", mapSym) + } + } + + maps[mapSym] = &spec + } + } + return maps, nil +} + +func getProgType(v string) (ProgramType, AttachType) { + types := map[string]ProgramType{ + // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c#n3568 + "socket": SocketFilter, + "seccomp": SocketFilter, + "kprobe/": Kprobe, + "kretprobe/": Kprobe, + "tracepoint/": TracePoint, + "xdp": XDP, + "perf_event": PerfEvent, + "sockops": SockOps, + "sk_skb": SkSKB, + "sk_msg": SkMsg, + "lirc_mode2": LircMode2, + "flow_dissector": FlowDissector, + + "cgroup_skb/": CGroupSKB, + "cgroup/dev": CGroupDevice, + "cgroup/skb": CGroupSKB, + "cgroup/sock": CGroupSock, + "cgroup/post_bind": CGroupSock, + "cgroup/bind": CGroupSockAddr, + "cgroup/connect": CGroupSockAddr, + "cgroup/sendmsg": CGroupSockAddr, + "cgroup/recvmsg": CGroupSockAddr, + "cgroup/sysctl": CGroupSysctl, + "cgroup/getsockopt": CGroupSockopt, + "cgroup/setsockopt": CGroupSockopt, + "classifier": SchedCLS, + "action": SchedACT, + } + attachTypes := map[string]AttachType{ + "cgroup_skb/ingress": AttachCGroupInetIngress, + "cgroup_skb/egress": AttachCGroupInetEgress, + "cgroup/sock": AttachCGroupInetSockCreate, + "cgroup/post_bind4": AttachCGroupInet4PostBind, + "cgroup/post_bind6": AttachCGroupInet6PostBind, + "cgroup/dev": AttachCGroupDevice, + "sockops": AttachCGroupSockOps, + "sk_skb/stream_parser": AttachSkSKBStreamParser, + "sk_skb/stream_verdict": AttachSkSKBStreamVerdict, + "sk_msg": AttachSkSKBStreamVerdict, + "lirc_mode2": AttachLircMode2, + "flow_dissector": AttachFlowDissector, + "cgroup/bind4": AttachCGroupInet4Bind, + "cgroup/bind6": AttachCGroupInet6Bind, + "cgroup/connect4": AttachCGroupInet4Connect, + "cgroup/connect6": AttachCGroupInet6Connect, + "cgroup/sendmsg4": AttachCGroupUDP4Sendmsg, + "cgroup/sendmsg6": AttachCGroupUDP6Sendmsg, + "cgroup/recvmsg4": AttachCGroupUDP4Recvmsg, + "cgroup/recvmsg6": AttachCGroupUDP6Recvmsg, + "cgroup/sysctl": AttachCGroupSysctl, + "cgroup/getsockopt": AttachCGroupGetsockopt, + "cgroup/setsockopt": AttachCGroupSetsockopt, + } + attachType := AttachNone + for k, t := range attachTypes { + if strings.HasPrefix(v, k) { + attachType = t + } + } + + for k, t := range types { + if strings.HasPrefix(v, k) { + return t, attachType + } + } + return UnspecifiedProgram, AttachNone +} + +func (ec *elfCode) loadRelocations(sec *elf.Section) (map[uint64]string, error) { + rels := make(map[uint64]string) + if sec == nil { + return rels, nil + } + + if sec.Entsize < 16 { + return nil, errors.New("rels are less than 16 bytes") + } + + r := sec.Open() + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, errors.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(ec.symbols) { + return nil, errors.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo) + } + + rels[rel.Off] = ec.symbols[symNo].Name + } + return rels, nil +} + +func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]string { + result := make(map[elf.SectionIndex]map[uint64]string) + for i, sym := range symbols { + switch elf.ST_TYPE(sym.Info) { + case elf.STT_NOTYPE: + // Older versions of LLVM doesn't tag + // symbols correctly. + break + case elf.STT_OBJECT: + break + case elf.STT_FUNC: + break + default: + continue + } + + if sym.Name == "" { + continue + } + + idx := sym.Section + if _, ok := result[idx]; !ok { + result[idx] = make(map[uint64]string) + } + result[idx][sym.Value] = symbols[i].Name + } + return result +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/feature.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/feature.go new file mode 100644 index 0000000000..9104bc9c4c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/feature.go @@ -0,0 +1,19 @@ +package ebpf + +import ( + "sync" +) + +type featureTest struct { + Fn func() bool + + once sync.Once + result bool +} + +func (ft *featureTest) Result() bool { + ft.once.Do(func() { + ft.result = ft.Fn() + }) + return ft.result +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.mod b/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.mod new file mode 100644 index 0000000000..687bdec9f6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.mod @@ -0,0 +1,8 @@ +module github.com/cilium/ebpf + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 +) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.sum b/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.sum new file mode 100644 index 0000000000..9edd97934c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/go.sum @@ -0,0 +1,4 @@ +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/cpu.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/cpu.go new file mode 100644 index 0000000000..ce3cab730a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/cpu.go @@ -0,0 +1,64 @@ +package internal + +import ( + "fmt" + "os" + "sync" + + "github.com/pkg/errors" +) + +var sysCPU struct { + once sync.Once + err error + num int +} + +// PossibleCPUs returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPUs() (int, error) { + sysCPU.once.Do(func() { + sysCPU.num, sysCPU.err = parseCPUs("/sys/devices/system/cpu/possible") + }) + + return sysCPU.num, sysCPU.err +} + +var onlineCPU struct { + once sync.Once + err error + num int +} + +// OnlineCPUs returns the number of currently online CPUs +// Logical CPU numbers must be of the form 0-n +func OnlineCPUs() (int, error) { + onlineCPU.once.Do(func() { + onlineCPU.num, onlineCPU.err = parseCPUs("/sys/devices/system/cpu/online") + }) + + return onlineCPU.num, onlineCPU.err +} + +// parseCPUs parses the number of cpus from sysfs, +// in the format of "/sys/devices/system/cpu/{possible,online,..}. +// Logical CPU numbers must be of the form 0-n +func parseCPUs(path string) (int, error) { + file, err := os.Open(path) + if err != nil { + return 0, err + } + defer file.Close() + + var low, high int + n, _ := fmt.Fscanf(file, "%d-%d", &low, &high) + if n < 1 || low != 0 { + return 0, errors.Wrapf(err, "%s has unknown format", path) + } + if n == 1 { + high = low + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go new file mode 100644 index 0000000000..ac8a94e512 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go @@ -0,0 +1,24 @@ +package internal + +import ( + "encoding/binary" + "unsafe" +) + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder + +func init() { + if isBigEndian() { + NativeEndian = binary.BigEndian + } else { + NativeEndian = binary.LittleEndian + } +} + +func isBigEndian() (ret bool) { + i := int(0x1) + bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) + return bs[0] == 0 +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 0000000000..49c6be5b0e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,118 @@ +// +build linux + +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +const ( + ENOENT = linux.ENOENT + EAGAIN = linux.EAGAIN + ENOSPC = linux.ENOSPC + EINVAL = linux.EINVAL + EPOLLIN = linux.EPOLLIN + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_SHARED = linux.MAP_SHARED + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PerfBitWatermark = linux.PerfBitWatermark + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC +) + +// Statfs_t is a wrapper +type Statfs_t = linux.Statfs_t + +// Rlimit is a wrapper +type Rlimit = linux.Rlimit + +// Setrlimit is a wrapper +func Setrlimit(resource int, rlim *Rlimit) (err error) { + return linux.Setrlimit(resource, rlim) +} + +// Syscall is a wrapper +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +// FcntlInt is a wrapper +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +// Statfs is a wrapper +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +// Close is a wrapper +func Close(fd int) (err error) { + return linux.Close(fd) +} + +// EpollEvent is a wrapper +type EpollEvent = linux.EpollEvent + +// EpollWait is a wrapper +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +// EpollCtl is a wrapper +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +// Eventfd is a wrapper +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +// Write is a wrapper +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +// EpollCreate1 is a wrapper +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +// PerfEventMmapPage is a wrapper +type PerfEventMmapPage linux.PerfEventMmapPage + +// SetNonblock is a wrapper +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +// Mmap is a wrapper +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +// Munmap is a wrapper +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +// PerfEventAttr is a wrapper +type PerfEventAttr = linux.PerfEventAttr + +// PerfEventOpen is a wrapper +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 0000000000..a327f2a599 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,183 @@ +// +build !linux + +package unix + +import ( + "fmt" + "runtime" + "syscall" +) + +var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + +const ( + ENOENT = syscall.ENOENT + EAGAIN = syscall.EAGAIN + ENOSPC = syscall.ENOSPC + EINVAL = syscall.EINVAL + BPF_OBJ_NAME_LEN = 0x10 + BPF_TAG_SIZE = 0x8 + SYS_BPF = 321 + F_DUPFD_CLOEXEC = 0x406 + EPOLLIN = 0x1 + EPOLL_CTL_ADD = 0x1 + EPOLL_CLOEXEC = 0x80000 + O_CLOEXEC = 0x80000 + O_NONBLOCK = 0x800 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + MAP_SHARED = 0x1 + PERF_TYPE_SOFTWARE = 0x1 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + PerfBitWatermark = 0x4000 + PERF_SAMPLE_RAW = 0x400 + PERF_FLAG_FD_CLOEXEC = 0x8 +) + +// Statfs_t is a wrapper +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +// Rlimit is a wrapper +type Rlimit struct { + Cur uint64 + Max uint64 +} + +// Setrlimit is a wrapper +func Setrlimit(resource int, rlim *Rlimit) (err error) { + return errNonLinux +} + +// Syscall is a wrapper +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return 0, 0, syscall.Errno(1) +} + +// FcntlInt is a wrapper +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux +} + +// Statfs is a wrapper +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux +} + +// Close is a wrapper +func Close(fd int) (err error) { + return errNonLinux +} + +// EpollEvent is a wrapper +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +// EpollWait is a wrapper +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux +} + +// EpollCtl is a wrapper +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux +} + +// Eventfd is a wrapper +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux +} + +// Write is a wrapper +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux +} + +// EpollCreate1 is a wrapper +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux +} + +// PerfEventMmapPage is a wrapper +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +// SetNonblock is a wrapper +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux +} + +// Mmap is a wrapper +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux +} + +// Munmap is a wrapper +func Munmap(b []byte) (err error) { + return errNonLinux +} + +// PerfEventAttr is a wrapper +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +// PerfEventOpen is a wrapper +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 0000000000..da556c2269 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,58 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/asm" +) + +// link resolves bpf-to-bpf calls. +// +// Each section may contain multiple functions / labels, and is only linked +// if the program being edited references one of these functions. +// +// Sections must not require linking themselves. +func link(insns asm.Instructions, sections ...asm.Instructions) (asm.Instructions, error) { + for _, section := range sections { + var err error + insns, err = linkSection(insns, section) + if err != nil { + return nil, err + } + } + return insns, nil +} + +func linkSection(insns, section asm.Instructions) (asm.Instructions, error) { + // A map of symbols to the libraries which contain them. + symbols, err := section.SymbolOffsets() + if err != nil { + return nil, err + } + + for _, ins := range insns { + if ins.Reference == "" { + continue + } + + if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.R1 { + continue + } + + if ins.Constant != -1 { + // This is already a valid call, no need to link again. + continue + } + + if _, ok := symbols[ins.Reference]; !ok { + // Symbol isn't available in this section + continue + } + + // At this point we know that at least one function in the + // library is called from insns. Merge the two sections. + // The rewrite of ins.Constant happens in asm.Instruction.Marshal. + return append(insns, section...), nil + } + + // None of the functions in the section are called. Do nothing. + return insns, nil +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 0000000000..028a913520 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,595 @@ +package ebpf + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" + + "github.com/pkg/errors" +) + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + Flags uint32 + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + cpy.InnerMap = ms.InnerMap.Copy() + return &cpy +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + fd *bpfFD + abi MapABI + // Per CPU maps return values larger than the size in the spec + fullValueSize int +} + +// NewMapFromFD creates a map from a raw fd. +// +// You should not use fd after calling this function. +func NewMapFromFD(fd int) (*Map, error) { + if fd < 0 { + return nil, errors.New("invalid fd") + } + bpfFd := newBPFFD(uint32(fd)) + + abi, err := newMapABIFromFd(bpfFd) + if err != nil { + bpfFd.forget() + return nil, err + } + return newMap(bpfFd, abi) +} + +// NewMap creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +func NewMap(spec *MapSpec) (*Map, error) { + if spec.Type != ArrayOfMaps && spec.Type != HashOfMaps { + return createMap(spec, nil) + } + + if spec.InnerMap == nil { + return nil, errors.Errorf("%s requires InnerMap", spec.Type) + } + + template, err := createMap(spec.InnerMap, nil) + if err != nil { + return nil, err + } + defer template.Close() + + return createMap(spec, template.fd) +} + +func createMap(spec *MapSpec, inner *bpfFD) (*Map, error) { + spec = spec.Copy() + + switch spec.Type { + case ArrayOfMaps: + fallthrough + case HashOfMaps: + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.Errorf("ValueSize must be zero or four for map of map") + } + spec.ValueSize = 4 + + case PerfEventArray: + if spec.KeySize != 0 { + return nil, errors.Errorf("KeySize must be zero for perf event array") + } + if spec.ValueSize != 0 { + return nil, errors.Errorf("ValueSize must be zero for perf event array") + } + if spec.MaxEntries == 0 { + n, err := internal.OnlineCPUs() + if err != nil { + return nil, errors.Wrap(err, "perf event array") + } + spec.MaxEntries = uint32(n) + } + + spec.KeySize = 4 + spec.ValueSize = 4 + } + + attr := bpfMapCreateAttr{ + mapType: spec.Type, + keySize: spec.KeySize, + valueSize: spec.ValueSize, + maxEntries: spec.MaxEntries, + flags: spec.Flags, + } + + if inner != nil { + var err error + attr.innerMapFd, err = inner.value() + if err != nil { + return nil, errors.Wrap(err, "map create") + } + } + + name, err := newBPFObjName(spec.Name) + if err != nil { + return nil, errors.Wrap(err, "map create") + } + + if haveObjName.Result() { + attr.mapName = name + } + + fd, err := bpfMapCreate(&attr) + if err != nil { + return nil, errors.Wrap(err, "map create") + } + + return newMap(fd, newMapABIFromSpec(spec)) +} + +func newMap(fd *bpfFD, abi *MapABI) (*Map, error) { + m := &Map{ + fd, + *abi, + int(abi.ValueSize), + } + + if !abi.Type.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return nil, err + } + + m.fullValueSize = align(int(abi.ValueSize), 8) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + return fmt.Sprintf("%s#%d", m.abi.Type, m.fd) +} + +// ABI gets the ABI of the Map +func (m *Map) ABI() MapABI { + return m.abi +} + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see IsNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + + if err := m.lookup(key, valuePtr); err != nil { + return err + } + + if valueBytes == nil { + return nil + } + + if m.abi.Type.hasPerCPUValue() { + return unmarshalPerCPUValue(valueOut, int(m.abi.ValueSize), valueBytes) + } + + switch value := valueOut.(type) { + case **Map: + m, err := unmarshalMap(valueBytes) + if err != nil { + return err + } + + (*value).Close() + *value = m + return nil + case *Map: + return errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil)) + case Map: + return errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil)) + + case **Program: + p, err := unmarshalProgram(valueBytes) + if err != nil { + return err + } + + (*value).Close() + *value = p + return nil + case *Program: + return errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil)) + case Program: + return errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil)) + + default: + return unmarshalBytes(valueOut, valueBytes) + } +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := newPtr(unsafe.Pointer(&valueBytes[0])) + + err := m.lookup(key, valuePtr) + if IsNotExist(err) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookup(key interface{}, valueOut syscallPtr) error { + keyPtr, err := marshalPtr(key, int(m.abi.KeySize)) + if err != nil { + return errors.WithMessage(err, "can't marshal key") + } + + err = bpfMapLookupElem(m.fd, keyPtr, valueOut) + return errors.WithMessage(err, "lookup failed") +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { + keyPtr, err := marshalPtr(key, int(m.abi.KeySize)) + if err != nil { + return errors.WithMessage(err, "can't marshal key") + } + + var valuePtr syscallPtr + if m.abi.Type.hasPerCPUValue() { + valuePtr, err = marshalPerCPUValue(value, int(m.abi.ValueSize)) + } else { + valuePtr, err = marshalPtr(value, int(m.abi.ValueSize)) + } + if err != nil { + return errors.WithMessage(err, "can't marshal value") + } + + return bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)) +} + +// Delete removes a value. +// +// Returns an error if the key does not exist, see IsNotExist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := marshalPtr(key, int(m.abi.KeySize)) + if err != nil { + return errors.WithMessage(err, "can't marshal key") + } + + err = bpfMapDeleteElem(m.fd, keyPtr) + return errors.WithMessage(err, "can't delete key") +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.abi.KeySize)) + + if err := m.nextKey(key, nextKeyPtr); err != nil { + return err + } + + if nextKeyBytes == nil { + return nil + } + + err := unmarshalBytes(nextKeyOut, nextKeyBytes) + return errors.WithMessage(err, "can't unmarshal next key") +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.abi.KeySize) + nextKeyPtr := newPtr(unsafe.Pointer(&nextKey[0])) + + err := m.nextKey(key, nextKeyPtr) + if IsNotExist(err) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut syscallPtr) error { + var ( + keyPtr syscallPtr + err error + ) + + if key != nil { + keyPtr, err = marshalPtr(key, int(m.abi.KeySize)) + if err != nil { + return errors.WithMessage(err, "can't marshal key") + } + } + + err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut) + return errors.WithMessage(err, "can't get next key") +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close removes a Map +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + fd, err := m.fd.value() + if err != nil { + // Best effort: -1 is the number most likely to be an + // invalid file descriptor. + return -1 + } + + return int(fd) +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.dup() + if err != nil { + return nil, errors.Wrap(err, "can't clone map") + } + + return newMap(dup, &m.abi) +} + +// Pin persists the map past the lifetime of the process that created it. +// +// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional +func (m *Map) Pin(fileName string) error { + return bpfPinObject(fileName, m.fd) +} + +// LoadPinnedMap load a Map from a BPF file. +// +// Requires at least Linux 4.13, and is not compatible with +// nested maps. Use LoadPinnedMapExplicit in these situations. +func LoadPinnedMap(fileName string) (*Map, error) { + fd, err := bpfGetObject(fileName) + if err != nil { + return nil, err + } + abi, err := newMapABIFromFd(fd) + if err != nil { + _ = fd.close() + return nil, err + } + return newMap(fd, abi) +} + +// LoadPinnedMapExplicit loads a map with explicit parameters. +func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) { + fd, err := bpfGetObject(fileName) + if err != nil { + return nil, err + } + return newMap(fd, abi) +} + +func unmarshalMap(buf []byte) (*Map, error) { + if len(buf) != 4 { + return nil, errors.New("map id requires 4 byte value") + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + id := internal.NativeEndian.Uint32(buf) + fd, err := bpfGetMapFDByID(id) + if err != nil { + return nil, err + } + + abi, err := newMapABIFromFd(fd) + if err != nil { + _ = fd.close() + return nil, err + } + + return newMap(fd, abi) +} + +// MarshalBinary implements BinaryMarshaler. +func (m *Map) MarshalBinary() ([]byte, error) { + fd, err := m.fd.value() + if err != nil { + return nil, err + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, fd) + return buf, nil +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + prevKey interface{} + prevBytes []byte + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.abi.MaxEntries, + prevBytes: make([]byte, int(target.abi.KeySize)), + } +} + +var errIterationAborted = errors.New("iteration aborted") + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + for ; mi.count < mi.maxEntries; mi.count++ { + var nextBytes []byte + nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey) + if mi.err != nil { + return false + } + + if nextBytes == nil { + mi.done = true + return false + } + + // The user can get access to nextBytes since unmarshalBytes + // does not copy when unmarshaling into a []byte. + // Make a copy to prevent accidental corruption of + // iterator state. + copy(mi.prevBytes, nextBytes) + mi.prevKey = mi.prevBytes + + mi.err = mi.target.Lookup(nextBytes, valueOut) + if IsNotExist(mi.err) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + return false + } + + mi.err = unmarshalBytes(keyOut, nextBytes) + return mi.err == nil + } + + mi.err = errIterationAborted + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +func (mi *MapIterator) Err() error { + return mi.err +} + +// IsNotExist returns true if the error indicates that a +// key doesn't exist. +func IsNotExist(err error) bool { + return errors.Cause(err) == unix.ENOENT +} + +// IsIterationAborted returns true if the iteration was aborted. +// +// This occurs when keys are deleted from a hash map during iteration. +func IsIterationAborted(err error) bool { + return errors.Cause(err) == errIterationAborted +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 0000000000..44ba273305 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,192 @@ +package ebpf + +import ( + "bytes" + "encoding" + "encoding/binary" + "reflect" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal" + + "github.com/pkg/errors" +) + +func marshalPtr(data interface{}, length int) (syscallPtr, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return newPtr(ptr), nil + } + + buf, err := marshalBytes(data, length) + if err != nil { + return syscallPtr{}, err + } + + return newPtr(unsafe.Pointer(&buf[0])), nil +} + +func marshalBytes(data interface{}, length int) (buf []byte, err error) { + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = []byte(value) + case []byte: + buf = value + case unsafe.Pointer: + err = errors.New("can't marshal from unsafe.Pointer") + default: + var wr bytes.Buffer + err = binary.Write(&wr, internal.NativeEndian, value) + err = errors.Wrapf(err, "encoding %T", value) + buf = wr.Bytes() + } + if err != nil { + return nil, err + } + + if len(buf) != length { + return nil, errors.Errorf("%T doesn't marshal to %d bytes", data, length) + } + return buf, nil +} + +func makeBuffer(dst interface{}, length int) (syscallPtr, []byte) { + if ptr, ok := dst.(unsafe.Pointer); ok { + return newPtr(ptr), nil + } + + buf := make([]byte, length) + return newPtr(unsafe.Pointer(&buf[0])), buf +} + +func unmarshalBytes(data interface{}, buf []byte) error { + switch value := data.(type) { + case unsafe.Pointer: + sh := &reflect.SliceHeader{ + Data: uintptr(value), + Len: len(buf), + Cap: len(buf), + } + + dst := *(*[]byte)(unsafe.Pointer(sh)) + copy(dst, buf) + runtime.KeepAlive(value) + return nil + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + case *string: + *value = string(buf) + return nil + case *[]byte: + *value = buf + return nil + case string: + return errors.New("require pointer to string") + case []byte: + return errors.New("require pointer to []byte") + default: + rd := bytes.NewReader(buf) + err := binary.Read(rd, internal.NativeEndian, value) + return errors.Wrapf(err, "decoding %T", value) + } +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +// +// slice must have a type like []elementType. +func marshalPerCPUValue(slice interface{}, elemLength int) (syscallPtr, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return syscallPtr{}, errors.New("per-CPU value requires slice") + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return syscallPtr{}, err + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return syscallPtr{}, errors.Errorf("per-CPU value exceeds number of CPUs") + } + + alignedElemLength := align(elemLength, 8) + buf := make([]byte, alignedElemLength*possibleCPUs) + + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := marshalBytes(elem, elemLength) + if err != nil { + return syscallPtr{}, err + } + + offset := i * alignedElemLength + copy(buf[offset:offset+elemLength], elemBytes) + } + + return newPtr(unsafe.Pointer(&buf[0])), nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// valueOut must have a type like *[]elementType +func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error { + slicePtrType := reflect.TypeOf(slicePtr) + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return errors.Errorf("per-cpu value requires pointer to slice") + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + if sliceElemIsPointer { + sliceElemType = sliceElemType.Elem() + } + + step := len(buf) / possibleCPUs + if step < elemLength { + return errors.Errorf("per-cpu element length is larger than available data") + } + for i := 0; i < possibleCPUs; i++ { + var elem interface{} + if sliceElemIsPointer { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + elem = newElem.Interface() + } else { + elem = slice.Index(i).Addr().Interface() + } + + // Make a copy, since unmarshal can hold on to itemBytes + elemBytes := make([]byte, elemLength) + copy(elemBytes, buf[:elemLength]) + + err := unmarshalBytes(elem, elemBytes) + if err != nil { + return errors.Wrapf(err, "cpu %d", i) + } + + buf = buf[step:] + } + + reflect.ValueOf(slicePtr).Elem().Set(slice) + return nil +} + +func align(n, alignment int) int { + return (int(n) + alignment - 1) / alignment * alignment +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 0000000000..03b24fb950 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,523 @@ +package ebpf + +import ( + "bytes" + "fmt" + "math" + "path/filepath" + "strings" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" + + "github.com/pkg/errors" +) + +var ( + errNotSupported = errors.New("ebpf: not supported by kernel") +) + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// DefaultVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const DefaultVerifierLogSize = 64 * 1024 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Controls the detail emitted by the kernel verifier. Set to non-zero + // to enable logging. + LogLevel uint32 + // Controls the output buffer size for the verifier. Defaults to + // DefaultVerifierLogSize. + LogSize int +} + +// ProgramSpec defines a Program +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type ProgramType + AttachType AttachType + Instructions asm.Instructions + License string + KernelVersion uint32 +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *bpfFD + name string + abi ProgramABI +} + +// NewProgram creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + attr, err := convertProgramSpec(spec, haveObjName.Result()) + if err != nil { + return nil, err + } + + logSize := DefaultVerifierLogSize + if opts.LogSize > 0 { + logSize = opts.LogSize + } + + var logBuf []byte + if opts.LogLevel > 0 { + logBuf = make([]byte, logSize) + attr.logLevel = opts.LogLevel + attr.logSize = uint32(len(logBuf)) + attr.logBuf = newPtr(unsafe.Pointer(&logBuf[0])) + } + + fd, err := bpfProgLoad(attr) + if err == nil { + prog := newProgram(fd, spec.Name, &ProgramABI{spec.Type}) + prog.VerifierLog = convertCString(logBuf) + return prog, nil + } + + truncated := errors.Cause(err) == unix.ENOSPC + if opts.LogLevel == 0 { + // Re-run with the verifier enabled to get better error messages. + logBuf = make([]byte, logSize) + attr.logLevel = 1 + attr.logSize = uint32(len(logBuf)) + attr.logBuf = newPtr(unsafe.Pointer(&logBuf[0])) + + _, nerr := bpfProgLoad(attr) + truncated = errors.Cause(nerr) == unix.ENOSPC + } + + logs := convertCString(logBuf) + if truncated { + logs += "\n(truncated...)" + } + + return nil, &loadError{err, logs} +} + +// NewProgramFromFD creates a program from a raw fd. +// +// You should not use fd after calling this function. +func NewProgramFromFD(fd int) (*Program, error) { + if fd < 0 { + return nil, errors.New("invalid fd") + } + bpfFd := newBPFFD(uint32(fd)) + + info, err := bpfGetProgInfoByFD(bpfFd) + if err != nil { + bpfFd.forget() + return nil, err + } + + var name string + if bpfName := convertCString(info.name[:]); bpfName != "" { + name = bpfName + } else { + name = convertCString(info.tag[:]) + } + + return newProgram(bpfFd, name, newProgramABIFromInfo(info)), nil +} + +func newProgram(fd *bpfFD, name string, abi *ProgramABI) *Program { + return &Program{ + name: name, + fd: fd, + abi: *abi, + } +} + +func convertProgramSpec(spec *ProgramSpec, includeName bool) (*bpfProgLoadAttr, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("Instructions cannot be empty") + } + + if len(spec.License) == 0 { + return nil, errors.New("License cannot be empty") + } + + buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) + err := spec.Instructions.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + insCount := uint32(len(bytecode) / asm.InstructionSize) + lic := []byte(spec.License) + attr := &bpfProgLoadAttr{ + progType: spec.Type, + expectedAttachType: spec.AttachType, + insCount: insCount, + instructions: newPtr(unsafe.Pointer(&bytecode[0])), + license: newPtr(unsafe.Pointer(&lic[0])), + } + + name, err := newBPFObjName(spec.Name) + if err != nil { + return nil, err + } + + if includeName { + attr.progName = name + } + + return attr, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%s", p.abi.Type, p.name, p.fd) + } + return fmt.Sprintf("%s#%s", p.abi.Type, p.fd) +} + +// ABI gets the ABI of the Program +func (p *Program) ABI() ProgramABI { + return p.abi +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + fd, err := p.fd.value() + if err != nil { + // Best effort: -1 is the number most likely to be an + // invalid file descriptor. + return -1 + } + + return int(fd) +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.dup() + if err != nil { + return nil, errors.Wrap(err, "can't clone program") + } + + return newProgram(dup, p.name, &p.abi), nil +} + +// Pin persists the Program past the lifetime of the process that created it +// +// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional +func (p *Program) Pin(fileName string) error { + return errors.Wrap(bpfPinObject(fileName, p.fd), "can't pin program") +} + +// Close unloads the program from the kernel. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.close() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. outLen may be zero. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + ret, out, _, err := p.testRun(in, 1) + return ret, out, err +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// The returned value is the return value of the last execution of +// the program. +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int) (uint32, time.Duration, error) { + ret, _, total, err := p.testRun(in, repeat) + return ret, total, err +} + +var noProgTestRun = featureTest{ + Fn: func() bool { + prog, err := NewProgram(&ProgramSpec{ + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return false + } + defer prog.Close() + + fd, err := prog.fd.value() + if err != nil { + return false + } + + // Programs require at least 14 bytes input + in := make([]byte, 14) + attr := bpfProgTestRunAttr{ + fd: fd, + dataSizeIn: uint32(len(in)), + dataIn: newPtr(unsafe.Pointer(&in[0])), + } + + _, err = bpfCall(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return errors.Cause(err) == unix.EINVAL + }, +} + +func (p *Program) testRun(in []byte, repeat int) (uint32, []byte, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, nil, 0, fmt.Errorf("repeat is too high") + } + + if len(in) == 0 { + return 0, nil, 0, fmt.Errorf("missing input") + } + + if uint(len(in)) > math.MaxUint32 { + return 0, nil, 0, fmt.Errorf("input is too long") + } + + if noProgTestRun.Result() { + return 0, nil, 0, errNotSupported + } + + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + out := make([]byte, len(in)+outputPad) + + fd, err := p.fd.value() + if err != nil { + return 0, nil, 0, err + } + + attr := bpfProgTestRunAttr{ + fd: fd, + dataSizeIn: uint32(len(in)), + dataSizeOut: uint32(len(out)), + dataIn: newPtr(unsafe.Pointer(&in[0])), + dataOut: newPtr(unsafe.Pointer(&out[0])), + repeat: uint32(repeat), + } + + _, err = bpfCall(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return 0, nil, 0, errors.Wrap(err, "can't run test") + } + + if int(attr.dataSizeOut) > cap(out) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + out = out[:int(attr.dataSizeOut)] + + total := time.Duration(attr.duration) * time.Nanosecond + return attr.retval, out, total, nil +} + +func unmarshalProgram(buf []byte) (*Program, error) { + if len(buf) != 4 { + return nil, errors.New("program id requires 4 byte value") + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + id := internal.NativeEndian.Uint32(buf) + fd, err := bpfGetProgramFDByID(id) + if err != nil { + return nil, err + } + + abi, err := newProgramABIFromFd(fd) + if err != nil { + _ = fd.close() + return nil, err + } + + return newProgram(fd, "", abi), nil +} + +// MarshalBinary implements BinaryMarshaler. +func (p *Program) MarshalBinary() ([]byte, error) { + value, err := p.fd.value() + if err != nil { + return nil, err + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, value) + return buf, nil +} + +// Attach a Program to a container object fd +func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { + if fd < 0 { + return errors.New("invalid fd") + } + + pfd, err := p.fd.value() + if err != nil { + return err + } + + attr := bpfProgAlterAttr{ + targetFd: uint32(fd), + attachBpfFd: pfd, + attachType: uint32(typ), + attachFlags: uint32(flags), + } + + return bpfProgAlter(_ProgAttach, &attr) +} + +// Detach a Program from a container object fd +func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { + if fd < 0 { + return errors.New("invalid fd") + } + + pfd, err := p.fd.value() + if err != nil { + return err + } + + attr := bpfProgAlterAttr{ + targetFd: uint32(fd), + attachBpfFd: pfd, + attachType: uint32(typ), + attachFlags: uint32(flags), + } + + return bpfProgAlter(_ProgDetach, &attr) +} + +// LoadPinnedProgram loads a Program from a BPF file. +// +// Requires at least Linux 4.13, use LoadPinnedProgramExplicit on +// earlier versions. +func LoadPinnedProgram(fileName string) (*Program, error) { + fd, err := bpfGetObject(fileName) + if err != nil { + return nil, err + } + + abi, err := newProgramABIFromFd(fd) + if err != nil { + _ = fd.close() + return nil, err + } + + return newProgram(fd, filepath.Base(fileName), abi), nil +} + +// LoadPinnedProgramExplicit loads a program with explicit parameters. +func LoadPinnedProgramExplicit(fileName string, abi *ProgramABI) (*Program, error) { + fd, err := bpfGetObject(fileName) + if err != nil { + return nil, err + } + + return newProgram(fd, filepath.Base(fileName), abi), nil +} + +// SanitizeName replaces all invalid characters in name. +// +// Use this to automatically generate valid names for maps and +// programs at run time. +// +// Passing a negative value for replacement will delete characters +// instead of replacing them. +func SanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + if invalidBPFObjNameChar(char) { + return replacement + } + return char + }, name) +} + +type loadError struct { + cause error + verifierLog string +} + +func (le *loadError) Error() string { + if le.verifierLog == "" { + return fmt.Sprintf("failed to load program: %s", le.cause) + } + return fmt.Sprintf("failed to load program: %s: %s", le.cause, le.verifierLog) +} + +func (le *loadError) Cause() error { + return le.cause +} + +// IsNotSupported returns true if an error occurred because +// the kernel does not have support for a specific feature. +func IsNotSupported(err error) bool { + return errors.Cause(err) == errNotSupported +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_be.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_be.go new file mode 100644 index 0000000000..775774436a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_be.go @@ -0,0 +1,14 @@ +// +build armbe mips mips64p32 + +package ebpf + +import ( + "unsafe" +) + +// ptr wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type syscallPtr struct { + pad uint32 + ptr unsafe.Pointer +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_le.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_le.go new file mode 100644 index 0000000000..14b805e92c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_32_le.go @@ -0,0 +1,14 @@ +// +build 386 amd64p32 arm mipsle mips64p32le + +package ebpf + +import ( + "unsafe" +) + +// ptr wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type syscallPtr struct { + ptr unsafe.Pointer + pad uint32 +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_64.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_64.go new file mode 100644 index 0000000000..c897d7273b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ptr_64.go @@ -0,0 +1,14 @@ +// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le +// +build !armbe,!mips,!mips64p32 + +package ebpf + +import ( + "unsafe" +) + +// ptr wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type syscallPtr struct { + ptr unsafe.Pointer +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/readme.md b/cluster-autoscaler/vendor/github.com/cilium/ebpf/readme.md new file mode 100644 index 0000000000..26ab2b9045 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/readme.md @@ -0,0 +1,20 @@ +eBPF +------- +[![](https://godoc.org/github.com/cilium/ebpf?status.svg)](https://godoc.org/github.com/cilium/ebpf) + +eBPF is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs. It has minimal external dependencies and is intended to be used in long running processes. + +[ebpf/asm](https://godoc.org/github.com/cilium/ebpf/asm) contains a basic assembler. + +The library is maintained by [Cloudflare](https://www.cloudflare.com) and [Cilium](https://www.cilium.io). Feel free to [join](https://cilium.herokuapp.com/) the [libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack. + +## Current status + +The package is production ready, but **the API is explicitly unstable +right now**. Expect to update your code if you want to follow along. + +## Useful resources + +* [Cilium eBPF documentation](https://cilium.readthedocs.io/en/latest/bpf/#bpf-guide) (recommended) +* [Linux documentation on BPF](http://elixir.free-electrons.com/linux/latest/source/Documentation/networking/filter.txt) +* [eBPF features by Linux version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh b/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh new file mode 100644 index 0000000000..f92d489932 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Test the current package under a different kernel. +# Requires virtme and qemu to be installed. + +set -eu +set -o pipefail + +if [[ "${1:-}" = "--in-vm" ]]; then + shift + + readonly home="$(mktemp --directory)" + + mount -t bpf bpf /sys/fs/bpf + export CGO_ENABLED=0 + export HOME="$home" + + echo Running tests... + /usr/local/bin/go test -mod=vendor -coverprofile="$1/coverage.txt" -covermode=atomic -v ./... + touch "$1/success" + exit 0 +fi + +# Force Go modules, so that vendoring and building are easier. +export GO111MODULE=on + +# Pull all dependencies, so that we can run tests without the +# vm having network access. +go mod vendor + +# Use sudo if /dev/kvm isn't accessible by the current user. +sudo="" +if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then + sudo="sudo" +fi +readonly sudo + +readonly kernel_version="${1:-}" +if [[ -z "${kernel_version}" ]]; then + echo "Expecting kernel version as first argument" + exit 1 +fi + +readonly kernel="linux-${kernel_version}.bz" +readonly output="$(mktemp -d)" +readonly tmp_dir="$(mktemp -d)" + +test -e "${tmp_dir}/${kernel}" || { + echo Fetching ${kernel} + curl --fail -L "https://github.com/newtools/ci-kernels/blob/master/${kernel}?raw=true" -o "${tmp_dir}/${kernel}" +} + +echo Testing on ${kernel_version} +$sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 256M --pwd --rwdir=/run/output="${output}" --script-sh "$(realpath "$0") --in-vm /run/output" + +if [[ ! -e "${output}/success" ]]; then + echo "Test failed on ${kernel_version}" + exit 1 +else + echo "Test successful on ${kernel_version}" + if [[ -v CODECOV_TOKEN ]]; then + curl --fail -s https://codecov.io/bash > "${tmp_dir}/codecov.sh" + chmod +x "${tmp_dir}/codecov.sh" + "${tmp_dir}/codecov.sh" -f "${output}/coverage.txt" + fi +fi + +$sudo rm -r "${output}" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 0000000000..68abd3bc67 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,420 @@ +package ebpf + +import ( + "bytes" + "path/filepath" + "runtime" + "strconv" + "strings" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" + + "github.com/pkg/errors" +) + +var errClosedFd = errors.New("use of closed file descriptor") + +type bpfFD struct { + raw int64 +} + +func newBPFFD(value uint32) *bpfFD { + fd := &bpfFD{int64(value)} + runtime.SetFinalizer(fd, (*bpfFD).close) + return fd +} + +func (fd *bpfFD) String() string { + return strconv.FormatInt(fd.raw, 10) +} + +func (fd *bpfFD) value() (uint32, error) { + if fd.raw < 0 { + return 0, errClosedFd + } + + return uint32(fd.raw), nil +} + +func (fd *bpfFD) close() error { + if fd.raw < 0 { + return nil + } + + value := int(fd.raw) + fd.raw = -1 + + fd.forget() + return unix.Close(value) +} + +func (fd *bpfFD) forget() { + runtime.SetFinalizer(fd, nil) +} + +func (fd *bpfFD) dup() (*bpfFD, error) { + if fd.raw < 0 { + return nil, errClosedFd + } + + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, errors.Wrap(err, "can't dup fd") + } + + return newBPFFD(uint32(dup)), nil +} + +// bpfObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte + +// newBPFObjName truncates the result if it is too long. +func newBPFObjName(name string) (bpfObjName, error) { + idx := strings.IndexFunc(name, invalidBPFObjNameChar) + if idx != -1 { + return bpfObjName{}, errors.Errorf("invalid character '%c' in name '%s'", name[idx], name) + } + + var result bpfObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result, nil +} + +func invalidBPFObjNameChar(char rune) bool { + switch { + case char >= 'A' && char <= 'Z': + fallthrough + case char >= 'a' && char <= 'z': + fallthrough + case char >= '0' && char <= '9': + fallthrough + case char == '_': + return false + default: + return true + } +} + +type bpfMapCreateAttr struct { + mapType MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + innerMapFd uint32 // since 4.12 56f668dfe00d + numaNode uint32 // since 4.14 96eabe7a40aa + mapName bpfObjName // since 4.15 ad5b177bd73f +} + +type bpfMapOpAttr struct { + mapFd uint32 + padding uint32 + key syscallPtr + value syscallPtr + flags uint64 +} + +type bpfMapInfo struct { + mapType uint32 + id uint32 + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + mapName bpfObjName // since 4.15 ad5b177bd73f +} + +type bpfPinObjAttr struct { + fileName syscallPtr + fd uint32 + padding uint32 +} + +type bpfProgLoadAttr struct { + progType ProgramType + insCount uint32 + instructions syscallPtr + license syscallPtr + logLevel uint32 + logSize uint32 + logBuf syscallPtr + kernelVersion uint32 // since 4.1 2541517c32be + progFlags uint32 // since 4.11 e07b98d9bffe + progName bpfObjName // since 4.15 067cae47771c + progIfIndex uint32 // since 4.15 1f6f4cb7ba21 + expectedAttachType AttachType // since 4.17 5e43f899b03a +} + +type bpfProgInfo struct { + progType uint32 + id uint32 + tag [unix.BPF_TAG_SIZE]byte + jitedLen uint32 + xlatedLen uint32 + jited syscallPtr + xlated syscallPtr + loadTime uint64 // since 4.15 cb4d2b3f03d8 + createdByUID uint32 + nrMapIDs uint32 + mapIds syscallPtr + name bpfObjName +} + +type bpfProgTestRunAttr struct { + fd uint32 + retval uint32 + dataSizeIn uint32 + dataSizeOut uint32 + dataIn syscallPtr + dataOut syscallPtr + repeat uint32 + duration uint32 +} + +type bpfProgAlterAttr struct { + targetFd uint32 + attachBpfFd uint32 + attachType uint32 + attachFlags uint32 +} + +type bpfObjGetInfoByFDAttr struct { + fd uint32 + infoLen uint32 + info syscallPtr // May be either bpfMapInfo or bpfProgInfo +} + +type bpfGetFDByIDAttr struct { + id uint32 + next uint32 +} + +func newPtr(ptr unsafe.Pointer) syscallPtr { + return syscallPtr{ptr: ptr} +} + +func bpfProgLoad(attr *bpfProgLoadAttr) (*bpfFD, error) { + for { + fd, err := bpfCall(_ProgLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if err == unix.EAGAIN { + continue + } + + if err != nil { + return nil, err + } + + return newBPFFD(uint32(fd)), nil + } +} + +func bpfProgAlter(cmd int, attr *bpfProgAlterAttr) error { + _, err := bpfCall(cmd, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +func bpfMapCreate(attr *bpfMapCreateAttr) (*bpfFD, error) { + fd, err := bpfCall(_MapCreate, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + + return newBPFFD(uint32(fd)), nil +} + +func bpfMapLookupElem(m *bpfFD, key, valueOut syscallPtr) error { + fd, err := m.value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: valueOut, + } + _, err = bpfCall(_MapLookupElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return err +} + +func bpfMapUpdateElem(m *bpfFD, key, valueOut syscallPtr, flags uint64) error { + fd, err := m.value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: valueOut, + flags: flags, + } + _, err = bpfCall(_MapUpdateElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return err +} + +func bpfMapDeleteElem(m *bpfFD, key syscallPtr) error { + fd, err := m.value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + } + _, err = bpfCall(_MapDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return err +} + +func bpfMapGetNextKey(m *bpfFD, key, nextKeyOut syscallPtr) error { + fd, err := m.value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: nextKeyOut, + } + _, err = bpfCall(_MapGetNextKey, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return err +} + +const bpfFSType = 0xcafe4a11 + +func bpfPinObject(fileName string, fd *bpfFD) error { + dirName := filepath.Dir(fileName) + var statfs unix.Statfs_t + if err := unix.Statfs(dirName, &statfs); err != nil { + return err + } + if uint64(statfs.Type) != bpfFSType { + return errors.Errorf("%s is not on a bpf filesystem", fileName) + } + + value, err := fd.value() + if err != nil { + return err + } + + _, err = bpfCall(_ObjPin, unsafe.Pointer(&bpfPinObjAttr{ + fileName: newPtr(unsafe.Pointer(&[]byte(fileName)[0])), + fd: value, + }), 16) + return errors.Wrapf(err, "pin object %s", fileName) +} + +func bpfGetObject(fileName string) (*bpfFD, error) { + ptr, err := bpfCall(_ObjGet, unsafe.Pointer(&bpfPinObjAttr{ + fileName: newPtr(unsafe.Pointer(&[]byte(fileName)[0])), + }), 16) + if err != nil { + return nil, errors.Wrapf(err, "get object %s", fileName) + } + return newBPFFD(uint32(ptr)), nil +} + +func bpfGetObjectInfoByFD(fd *bpfFD, info unsafe.Pointer, size uintptr) error { + value, err := fd.value() + if err != nil { + return err + } + + // available from 4.13 + attr := bpfObjGetInfoByFDAttr{ + fd: value, + infoLen: uint32(size), + info: newPtr(info), + } + _, err = bpfCall(_ObjGetInfoByFD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return errors.Wrapf(err, "fd %d", value) +} + +func bpfGetProgInfoByFD(fd *bpfFD) (*bpfProgInfo, error) { + var info bpfProgInfo + err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) + return &info, errors.Wrap(err, "can't get program info") +} + +func bpfGetMapInfoByFD(fd *bpfFD) (*bpfMapInfo, error) { + var info bpfMapInfo + err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) + return &info, errors.Wrap(err, "can't get map info:") +} + +var haveObjName = featureTest{ + Fn: func() bool { + name, err := newBPFObjName("feature_test") + if err != nil { + // This really is a fatal error, but it should be caught + // by the unit tests not working. + return false + } + + attr := bpfMapCreateAttr{ + mapType: Array, + keySize: 4, + valueSize: 4, + maxEntries: 1, + mapName: name, + } + + fd, err := bpfMapCreate(&attr) + if err != nil { + return false + } + + _ = fd.close() + return true + }, +} + +func bpfGetMapFDByID(id uint32) (*bpfFD, error) { + // available from 4.13 + attr := bpfGetFDByIDAttr{ + id: id, + } + ptr, err := bpfCall(_MapGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return nil, errors.Wrapf(err, "can't get fd for map id %d", id) + } + return newBPFFD(uint32(ptr)), nil +} + +func bpfGetProgramFDByID(id uint32) (*bpfFD, error) { + // available from 4.13 + attr := bpfGetFDByIDAttr{ + id: id, + } + ptr, err := bpfCall(_ProgGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return nil, errors.Wrapf(err, "can't get fd for program id %d", id) + } + return newBPFFD(uint32(ptr)), nil +} + +func bpfCall(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + var err error + if errNo != 0 { + err = errNo + } + + return r1, err +} + +func convertCString(in []byte) string { + inLen := bytes.IndexByte(in, 0) + if inLen == -1 { + return "" + } + return string(in[:inLen]) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 0000000000..0daf9a7152 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,189 @@ +package ebpf + +//go:generate stringer -output types_string.go -type=MapType,ProgramType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = iota + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than thow an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps +) + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + if mt == PerCPUHash || mt == PerCPUArray { + return true + } + return false +} + +const ( + _MapCreate = iota + _MapLookupElem + _MapUpdateElem + _MapDeleteElem + _MapGetNextKey + _ProgLoad + _ObjPin + _ObjGet + _ProgAttach + _ProgDetach + _ProgTestRun + _ProgGetNextID + _MapGetNextID + _ProgGetFDByID + _MapGetFDByID + _ObjGetInfoByFD +) + +const ( + _Any = iota + _NoExist + _Exist +) + +// ProgramType of the eBPF program +type ProgramType uint32 + +// eBPF program types +const ( + // Unrecognized program type + UnspecifiedProgram ProgramType = iota + // SocketFilter socket or seccomp filter + SocketFilter + // Kprobe program + Kprobe + // SchedCLS traffic control shaper + SchedCLS + // SchedACT routing control shaper + SchedACT + // TracePoint program + TracePoint + // XDP program + XDP + // PerfEvent program + PerfEvent + // CGroupSKB program + CGroupSKB + // CGroupSock program + CGroupSock + // LWTIn program + LWTIn + // LWTOut program + LWTOut + // LWTXmit program + LWTXmit + // SockOps program + SockOps + // SkSKB program + SkSKB + // CGroupDevice program + CGroupDevice + // SkMsg program + SkMsg + // RawTracepoint program + RawTracepoint + // CGroupSockAddr program + CGroupSockAddr + // LWTSeg6Local program + LWTSeg6Local + // LircMode2 program + LircMode2 + // SkReuseport program + SkReuseport + // FlowDissector program + FlowDissector + // CGroupSysctl program + CGroupSysctl + // RawTracepointWritable program + RawTracepointWritable + // CGroupSockopt program + CGroupSockopt +) + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons +const AttachNone AttachType = 0 + +const ( + AttachCGroupInetIngress AttachType = iota + AttachCGroupInetEgress + AttachCGroupInetSockCreate + AttachCGroupSockOps + AttachSkSKBStreamParser + AttachSkSKBStreamVerdict + AttachCGroupDevice + AttachSkMsgVerdict + AttachCGroupInet4Bind + AttachCGroupInet6Bind + AttachCGroupInet4Connect + AttachCGroupInet6Connect + AttachCGroupInet4PostBind + AttachCGroupInet6PostBind + AttachCGroupUDP4Sendmsg + AttachCGroupUDP6Sendmsg + AttachLircMode2 + AttachFlowDissector + AttachCGroupSysctl + AttachCGroupUDP4Recvmsg + AttachCGroupUDP6Recvmsg + AttachCGroupGetsockopt + AttachCGroupSetsockopt +) + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 0000000000..4813437ec2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,78 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] +} + +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMaps" + +var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136} + +func (i MapType) String() string { + if i >= MapType(len(_MapType_index)-1) { + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] +} + +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockopt" + +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258} + +func (i ProgramType) String() string { + if i >= ProgramType(len(_ProgramType_index)-1) { + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] +} diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go index b6b7f2bb5e..efac4e367c 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/constants.go @@ -144,6 +144,17 @@ const ( // a statically assigned hash table by their source IP // addresses. SourceHashing = "sh" + + // WeightedRoundRobin assigns jobs to real servers proportionally + // to there real servers' weight. Servers with higher weights + // receive new jobs first and get more jobs than servers + // with lower weights. Servers with equal weights get + // an equal distribution of new jobs + WeightedRoundRobin = "wrr" + + // WeightedLeastConnection assigns more jobs to servers + // with fewer jobs and relative to the real servers' weight + WeightedLeastConnection = "wlc" ) const ( diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go index 083909ae05..7673659aa8 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -315,6 +315,7 @@ func assembleStats(msg []byte) (SvcStats, error) { func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { var s Service + var addressBytes []byte for _, attr := range attrs { @@ -327,11 +328,7 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { case ipvsSvcAttrProtocol: s.Protocol = native.Uint16(attr.Value) case ipvsSvcAttrAddress: - ip, err := parseIP(attr.Value, s.AddressFamily) - if err != nil { - return nil, err - } - s.Address = ip + addressBytes = attr.Value case ipvsSvcAttrPort: s.Port = binary.BigEndian.Uint16(attr.Value) case ipvsSvcAttrFWMark: @@ -353,6 +350,16 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { } } + + // parse Address after parse AddressFamily incase of parseIP error + if addressBytes != nil { + ip, err := parseIP(addressBytes, s.AddressFamily) + if err != nil { + return nil, err + } + s.Address = ip + } + return &s, nil } @@ -416,18 +423,18 @@ func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { var d Destination + var addressBytes []byte for _, attr := range attrs { attrType := int(attr.Attr.Type) switch attrType { + + case ipvsDestAttrAddressFamily: + d.AddressFamily = native.Uint16(attr.Value) case ipvsDestAttrAddress: - ip, err := parseIP(attr.Value, syscall.AF_INET) - if err != nil { - return nil, err - } - d.Address = ip + addressBytes = attr.Value case ipvsDestAttrPort: d.Port = binary.BigEndian.Uint16(attr.Value) case ipvsDestAttrForwardingMethod: @@ -438,8 +445,6 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.UpperThreshold = native.Uint32(attr.Value) case ipvsDestAttrLowerThreshold: d.LowerThreshold = native.Uint32(attr.Value) - case ipvsDestAttrAddressFamily: - d.AddressFamily = native.Uint16(attr.Value) case ipvsDestAttrActiveConnections: d.ActiveConnections = int(native.Uint16(attr.Value)) case ipvsDestAttrInactiveConnections: @@ -452,6 +457,16 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.Stats = DstStats(stats) } } + + // parse Address after parse AddressFamily incase of parseIP error + if addressBytes != nil { + ip, err := parseIP(addressBytes, d.AddressFamily) + if err != nil { + return nil, err + } + d.Address = ip + } + return &d, nil } diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md b/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md index 64869af347..386c2a457a 100644 --- a/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md +++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md @@ -3,7 +3,7 @@ gofuzz gofuzz is a library for populating go objects with random values. -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) [![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) This is useful for testing: diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go b/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go index 1dfa80a6fc..da0a5f9380 100644 --- a/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go +++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go @@ -20,6 +20,7 @@ import ( "fmt" "math/rand" "reflect" + "regexp" "time" ) @@ -28,13 +29,14 @@ type fuzzFuncMap map[reflect.Type]reflect.Value // Fuzzer knows how to fill any object with random fields. type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, @@ -150,6 +152,13 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer { return f } +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -274,7 +283,17 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } } case reflect.Chan: fallthrough diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index 25ff515893..c0a965923f 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -37,8 +37,18 @@ type Manager interface { // restore the object later. GetPaths() map[string]string + // GetUnifiedPath returns the unified path when running in unified mode. + // The value corresponds to the all values of GetPaths() map. + // + // GetUnifiedPath returns error when running in hybrid mode as well as + // in legacy mode. + GetUnifiedPath() (string, error) + // Sets the cgroup as configured. Set(container *configs.Config) error + + // Gets the cgroup as configured. + GetCgroups() (*configs.Cgroup, error) } type NotFoundError struct { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go new file mode 100644 index 0000000000..847ce8ef1a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go @@ -0,0 +1,180 @@ +// Package devicefilter containes eBPF device filter program +// +// The implementation is based on https://github.com/containers/crun/blob/0.10.2/src/libcrun/ebpf.c +// +// Although ebpf.c is originally licensed under LGPL-3.0-or-later, the author (Giuseppe Scrivano) +// agreed to relicense the file in Apache License 2.0: https://github.com/opencontainers/runc/issues/2144#issuecomment-543116397 +package devicefilter + +import ( + "fmt" + "math" + + "github.com/cilium/ebpf/asm" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + // license string format is same as kernel MODULE_LICENSE macro + license = "Apache" +) + +// DeviceFilter returns eBPF device filter program and its license string +func DeviceFilter(devices []*configs.Device) (asm.Instructions, string, error) { + p := &program{} + p.init() + for i := len(devices) - 1; i >= 0; i-- { + if err := p.appendDevice(devices[i]); err != nil { + return nil, "", err + } + } + insts, err := p.finalize() + return insts, license, err +} + +type program struct { + insts asm.Instructions + hasWildCard bool + blockID int +} + +func (p *program) init() { + // struct bpf_cgroup_dev_ctx: https://elixir.bootlin.com/linux/v5.3.6/source/include/uapi/linux/bpf.h#L3423 + /* + u32 access_type + u32 major + u32 minor + */ + // R2 <- type (lower 16 bit of u32 access_type at R1[0]) + p.insts = append(p.insts, + asm.LoadMem(asm.R2, asm.R1, 0, asm.Half)) + + // R3 <- access (upper 16 bit of u32 access_type at R1[0]) + p.insts = append(p.insts, + asm.LoadMem(asm.R3, asm.R1, 0, asm.Word), + // RSh: bitwise shift right + asm.RSh.Imm32(asm.R3, 16)) + + // R4 <- major (u32 major at R1[4]) + p.insts = append(p.insts, + asm.LoadMem(asm.R4, asm.R1, 4, asm.Word)) + + // R5 <- minor (u32 minor at R1[8]) + p.insts = append(p.insts, + asm.LoadMem(asm.R5, asm.R1, 8, asm.Word)) +} + +// appendDevice needs to be called from the last element of OCI linux.resources.devices to the head element. +func (p *program) appendDevice(dev *configs.Device) error { + if p.blockID < 0 { + return errors.New("the program is finalized") + } + if p.hasWildCard { + // All entries after wildcard entry are ignored + return nil + } + + bpfType := int32(-1) + hasType := true + switch dev.Type { + case 'c': + bpfType = int32(unix.BPF_DEVCG_DEV_CHAR) + case 'b': + bpfType = int32(unix.BPF_DEVCG_DEV_BLOCK) + case 'a': + hasType = false + default: + // if not specified in OCI json, typ is set to DeviceTypeAll + return errors.Errorf("invalid DeviceType %q", string(dev.Type)) + } + if dev.Major > math.MaxUint32 { + return errors.Errorf("invalid major %d", dev.Major) + } + if dev.Minor > math.MaxUint32 { + return errors.Errorf("invalid minor %d", dev.Major) + } + hasMajor := dev.Major >= 0 // if not specified in OCI json, major is set to -1 + hasMinor := dev.Minor >= 0 + bpfAccess := int32(0) + for _, r := range dev.Permissions { + switch r { + case 'r': + bpfAccess |= unix.BPF_DEVCG_ACC_READ + case 'w': + bpfAccess |= unix.BPF_DEVCG_ACC_WRITE + case 'm': + bpfAccess |= unix.BPF_DEVCG_ACC_MKNOD + default: + return errors.Errorf("unknown device access %v", r) + } + } + // If the access is rwm, skip the check. + hasAccess := bpfAccess != (unix.BPF_DEVCG_ACC_READ | unix.BPF_DEVCG_ACC_WRITE | unix.BPF_DEVCG_ACC_MKNOD) + + blockSym := fmt.Sprintf("block-%d", p.blockID) + nextBlockSym := fmt.Sprintf("block-%d", p.blockID+1) + prevBlockLastIdx := len(p.insts) - 1 + if hasType { + p.insts = append(p.insts, + // if (R2 != bpfType) goto next + asm.JNE.Imm(asm.R2, bpfType, nextBlockSym), + ) + } + if hasAccess { + p.insts = append(p.insts, + // if (R3 & bpfAccess == 0 /* use R1 as a temp var */) goto next + asm.Mov.Reg32(asm.R1, asm.R3), + asm.And.Imm32(asm.R1, bpfAccess), + asm.JEq.Imm(asm.R1, 0, nextBlockSym), + ) + } + if hasMajor { + p.insts = append(p.insts, + // if (R4 != major) goto next + asm.JNE.Imm(asm.R4, int32(dev.Major), nextBlockSym), + ) + } + if hasMinor { + p.insts = append(p.insts, + // if (R5 != minor) goto next + asm.JNE.Imm(asm.R5, int32(dev.Minor), nextBlockSym), + ) + } + if !hasType && !hasAccess && !hasMajor && !hasMinor { + p.hasWildCard = true + } + p.insts = append(p.insts, acceptBlock(dev.Allow)...) + // set blockSym to the first instruction we added in this iteration + p.insts[prevBlockLastIdx+1] = p.insts[prevBlockLastIdx+1].Sym(blockSym) + p.blockID++ + return nil +} + +func (p *program) finalize() (asm.Instructions, error) { + if p.hasWildCard { + // acceptBlock with asm.Return() is already inserted + return p.insts, nil + } + blockSym := fmt.Sprintf("block-%d", p.blockID) + p.insts = append(p.insts, + // R0 <- 0 + asm.Mov.Imm32(asm.R0, 0).Sym(blockSym), + asm.Return(), + ) + p.blockID = -1 + return p.insts, nil +} + +func acceptBlock(accept bool) asm.Instructions { + v := int32(0) + if accept { + v = 1 + } + return []asm.Instruction{ + // R0 <- v + asm.Mov.Imm32(asm.R0, v), + asm.Return(), + } +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf.go new file mode 100644 index 0000000000..4795e0aa3f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf.go @@ -0,0 +1,45 @@ +package ebpf + +import ( + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// LoadAttachCgroupDeviceFilter installs eBPF device filter program to /sys/fs/cgroup/ directory. +// +// Requires the system to be running in cgroup2 unified-mode with kernel >= 4.15 . +// +// https://github.com/torvalds/linux/commit/ebc614f687369f9df99828572b1d85a7c2de3d92 +func LoadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFD int) (func() error, error) { + nilCloser := func() error { + return nil + } + // Increase `ulimit -l` limit to avoid BPF_PROG_LOAD error (#2167). + // This limit is not inherited into the container. + memlockLimit := &unix.Rlimit{ + Cur: unix.RLIM_INFINITY, + Max: unix.RLIM_INFINITY, + } + _ = unix.Setrlimit(unix.RLIMIT_MEMLOCK, memlockLimit) + spec := &ebpf.ProgramSpec{ + Type: ebpf.CGroupDevice, + Instructions: insts, + License: license, + } + prog, err := ebpf.NewProgram(spec) + if err != nil { + return nilCloser, err + } + if err := prog.Attach(dirFD, ebpf.AttachCGroupDevice, unix.BPF_F_ALLOW_MULTI); err != nil { + return nilCloser, errors.Wrap(err, "failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI)") + } + closer := func() error { + if err := prog.Detach(dirFD, ebpf.AttachCGroupDevice, unix.BPF_F_ALLOW_MULTI); err != nil { + return errors.Wrap(err, "failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI)") + } + return nil + } + return closer, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index 512fd70010..ec148b4890 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -5,7 +5,6 @@ package fs import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -33,14 +32,6 @@ var ( &FreezerGroup{}, &NameGroup{GroupName: "name=systemd", Join: true}, } - subsystemsUnified = subsystemSet{ - &CpusetGroupV2{}, - &FreezerGroupV2{}, - &CpuGroupV2{}, - &MemoryGroupV2{}, - &IOGroupV2{}, - &PidsGroupV2{}, - } HugePageSizes, _ = cgroups.GetHugePageSize() ) @@ -138,9 +129,6 @@ func isIgnorableError(rootless bool, err error) bool { } func (m *Manager) getSubsystems() subsystemSet { - if cgroups.IsCgroup2UnifiedMode() { - return subsystemsUnified - } return subsystemsLegacy } @@ -224,6 +212,10 @@ func (m *Manager) GetPaths() map[string]string { return paths } +func (m *Manager) GetUnifiedPath() (string, error) { + return "", errors.New("unified path is only supported when running in unified mode") +} + func (m *Manager) GetStats() (*cgroups.Stats, error) { m.mu.Lock() defer m.mu.Unlock() @@ -377,23 +369,6 @@ func (raw *cgroupData) join(subsystem string) (string, error) { return path, nil } -func writeFile(dir, file, data string) error { - // Normally dir should not be empty, one case is that cgroup subsystem - // is not mounted, we will get empty dir, and we want it fail here. - if dir == "" { - return fmt.Errorf("no such directory for %s", file) - } - if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil { - return fmt.Errorf("failed to write %v to %v: %v", data, file, err) - } - return nil -} - -func readFile(dir, file string) (string, error) { - data, err := ioutil.ReadFile(filepath.Join(dir, file)) - return string(data), err -} - func removePath(p string, err error) error { if err != nil { return err @@ -430,3 +405,7 @@ func CheckCpushares(path string, c uint64) error { return nil } + +func (m *Manager) GetCgroups() (*configs.Cgroup, error) { + return m.Cgroups, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go index a142cb991d..52c118d68c 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -31,41 +32,41 @@ func (s *BlkioGroup) Apply(d *cgroupData) error { func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.BlkioWeight != 0 { - if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { + if err := fscommon.WriteFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { return err } } if cgroup.Resources.BlkioLeafWeight != 0 { - if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil { + if err := fscommon.WriteFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil { return err } } for _, wd := range cgroup.Resources.BlkioWeightDevice { - if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil { + if err := fscommon.WriteFile(path, "blkio.weight_device", wd.WeightString()); err != nil { return err } - if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil { + if err := fscommon.WriteFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil { return err } } for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice { - if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil { + if err := fscommon.WriteFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil { return err } } for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice { - if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil { + if err := fscommon.WriteFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil { return err } } for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice { - if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil { + if err := fscommon.WriteFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil { return err } } for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice { - if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil { + if err := fscommon.WriteFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go index e240a8313a..4db7b647cf 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go @@ -9,6 +9,7 @@ import ( "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -51,12 +52,12 @@ func (s *CpuGroup) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.CpuRtPeriod != 0 { - if err := writeFile(path, "cpu.rt_period_us", strconv.FormatUint(cgroup.Resources.CpuRtPeriod, 10)); err != nil { + if err := fscommon.WriteFile(path, "cpu.rt_period_us", strconv.FormatUint(cgroup.Resources.CpuRtPeriod, 10)); err != nil { return err } } if cgroup.Resources.CpuRtRuntime != 0 { - if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil { + if err := fscommon.WriteFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil { return err } } @@ -65,17 +66,17 @@ func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error { func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.CpuShares != 0 { - if err := writeFile(path, "cpu.shares", strconv.FormatUint(cgroup.Resources.CpuShares, 10)); err != nil { + if err := fscommon.WriteFile(path, "cpu.shares", strconv.FormatUint(cgroup.Resources.CpuShares, 10)); err != nil { return err } } if cgroup.Resources.CpuPeriod != 0 { - if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatUint(cgroup.Resources.CpuPeriod, 10)); err != nil { + if err := fscommon.WriteFile(path, "cpu.cfs_period_us", strconv.FormatUint(cgroup.Resources.CpuPeriod, 10)); err != nil { return err } } if cgroup.Resources.CpuQuota != 0 { - if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil { + if err := fscommon.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil { return err } } @@ -98,7 +99,7 @@ func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { sc := bufio.NewScanner(f) for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) + t, v, err := fscommon.GetCgroupParamKeyValue(sc.Text()) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go deleted file mode 100644 index 245071ae50..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_v2.go +++ /dev/null @@ -1,92 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type CpuGroupV2 struct { -} - -func (s *CpuGroupV2) Name() string { - return "cpu" -} - -func (s *CpuGroupV2) Apply(d *cgroupData) error { - // We always want to join the cpu group, to allow fair cpu scheduling - // on a container basis - path, err := d.path("cpu") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return s.ApplyDir(path, d.config, d.pid) -} - -func (s *CpuGroupV2) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error { - // This might happen if we have no cpu cgroup mounted. - // Just do nothing and don't fail. - if path == "" { - return nil - } - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - return cgroups.WriteCgroupProc(path, pid) -} - -func (s *CpuGroupV2) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.CpuWeight != 0 { - if err := writeFile(path, "cpu.weight", strconv.FormatUint(cgroup.Resources.CpuWeight, 10)); err != nil { - return err - } - } - - if cgroup.Resources.CpuMax != "" { - if err := writeFile(path, "cpu.max", cgroup.Resources.CpuMax); err != nil { - return err - } - } - - return nil -} - -func (s *CpuGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("cpu")) -} - -func (s *CpuGroupV2) GetStats(path string, stats *cgroups.Stats) error { - f, err := os.Open(filepath.Join(path, "cpu.stat")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) - if err != nil { - return err - } - switch t { - case "usage_usec": - stats.CpuStats.CpuUsage.TotalUsage = v * 1000 - - case "user_usec": - stats.CpuStats.CpuUsage.UsageInUsermode = v * 1000 - - case "system_usec": - stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000 - } - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go index 53afbaddf1..95dc9a1ecc 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" ) @@ -51,7 +52,7 @@ func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { return err } - totalUsage, err := getCgroupParamUint(path, "cpuacct.usage") + totalUsage, err := fscommon.GetCgroupParamUint(path, "cpuacct.usage") if err != nil { return err } @@ -85,8 +86,8 @@ func getCpuUsageBreakdown(path string) (uint64, uint64, error) { return 0, 0, err } fields := strings.Fields(string(data)) - if len(fields) != 4 { - return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat)) + if len(fields) < 4 { + return 0, 0, fmt.Errorf("failure - %s is expected to have at least 4 fields", filepath.Join(path, cgroupCpuacctStat)) } if fields[0] != userField { return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go index 5a1d152ea1..bfc900e3f6 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go @@ -10,6 +10,7 @@ import ( "path/filepath" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" ) @@ -31,12 +32,12 @@ func (s *CpusetGroup) Apply(d *cgroupData) error { func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.CpusetCpus != "" { - if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { + if err := fscommon.WriteFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { return err } } if cgroup.Resources.CpusetMems != "" { - if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { + if err := fscommon.WriteFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { return err } } @@ -135,12 +136,12 @@ func (s *CpusetGroup) copyIfNeeded(current, parent string) error { } if s.isEmpty(currentCpus) { - if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + if err := fscommon.WriteFile(current, "cpuset.cpus", string(parentCpus)); err != nil { return err } } if s.isEmpty(currentMems) { - if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + if err := fscommon.WriteFile(current, "cpuset.mems", string(parentMems)); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go deleted file mode 100644 index fa88cfb7d6..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_v2.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build linux - -package fs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" -) - -type CpusetGroupV2 struct { -} - -func (s *CpusetGroupV2) Name() string { - return "cpuset" -} - -func (s *CpusetGroupV2) Apply(d *cgroupData) error { - dir, err := d.path("cpuset") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return s.ApplyDir(dir, d.config, d.pid) -} - -func (s *CpusetGroupV2) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.CpusetCpus != "" { - if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { - return err - } - } - if cgroup.Resources.CpusetMems != "" { - if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { - return err - } - } - return nil -} - -func (s *CpusetGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("cpuset")) -} - -func (s *CpusetGroupV2) GetStats(path string, stats *cgroups.Stats) error { - return nil -} - -func (s *CpusetGroupV2) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error { - // This might happen if we have no cpuset cgroup mounted. - // Just do nothing and don't fail. - if dir == "" { - return nil - } - mountInfo, err := ioutil.ReadFile("/proc/self/mountinfo") - if err != nil { - return err - } - root := filepath.Dir(cgroups.GetClosestMountpointAncestor(dir, string(mountInfo))) - // 'ensureParent' start with parent because we don't want to - // explicitly inherit from parent, it could conflict with - // 'cpuset.cpu_exclusive'. - if err := s.ensureParent(filepath.Dir(dir), root); err != nil { - return err - } - if err := os.MkdirAll(dir, 0755); err != nil { - return err - } - // We didn't inherit cpuset configs from parent, but we have - // to ensure cpuset configs are set before moving task into the - // cgroup. - // The logic is, if user specified cpuset configs, use these - // specified configs, otherwise, inherit from parent. This makes - // cpuset configs work correctly with 'cpuset.cpu_exclusive', and - // keep backward compatibility. - if err := s.ensureCpusAndMems(dir, cgroup); err != nil { - return err - } - - // because we are not using d.join we need to place the pid into the procs file - // unlike the other subsystems - return cgroups.WriteCgroupProc(dir, pid) -} - -func (s *CpusetGroupV2) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { - if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus.effective")); err != nil { - return - } - if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems.effective")); err != nil { - return - } - return cpus, mems, nil -} - -// ensureParent makes sure that the parent directory of current is created -// and populated with the proper cpus and mems files copied from -// it's parent. -func (s *CpusetGroupV2) ensureParent(current, root string) error { - parent := filepath.Dir(current) - if libcontainerUtils.CleanPath(parent) == root { - return nil - } - // Avoid infinite recursion. - if parent == current { - return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") - } - if err := s.ensureParent(parent, root); err != nil { - return err - } - if err := os.MkdirAll(current, 0755); err != nil { - return err - } - return s.copyIfNeeded(current, parent) -} - -// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent -// directory to the current directory if the file's contents are 0 -func (s *CpusetGroupV2) copyIfNeeded(current, parent string) error { - var ( - err error - currentCpus, currentMems []byte - parentCpus, parentMems []byte - ) - - if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { - return err - } - if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { - return err - } - - if s.isEmpty(currentCpus) { - if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { - return err - } - } - if s.isEmpty(currentMems) { - if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { - return err - } - } - return nil -} - -func (s *CpusetGroupV2) isEmpty(b []byte) bool { - return len(bytes.Trim(b, "\n")) == 0 -} - -func (s *CpusetGroupV2) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error { - if err := s.Set(path, cgroup); err != nil { - return err - } - return s.copyIfNeeded(path, filepath.Dir(path)) -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go index 0ac5b4ed70..036c8db773 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go @@ -4,6 +4,7 @@ package fs import ( "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" ) @@ -37,7 +38,7 @@ func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error { if dev.Allow { file = "devices.allow" } - if err := writeFile(path, file, dev.CgroupString()); err != nil { + if err := fscommon.WriteFile(path, file, dev.CgroupString()); err != nil { return err } } @@ -45,25 +46,25 @@ func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error { } if cgroup.Resources.AllowAllDevices != nil { if *cgroup.Resources.AllowAllDevices == false { - if err := writeFile(path, "devices.deny", "a"); err != nil { + if err := fscommon.WriteFile(path, "devices.deny", "a"); err != nil { return err } for _, dev := range cgroup.Resources.AllowedDevices { - if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil { + if err := fscommon.WriteFile(path, "devices.allow", dev.CgroupString()); err != nil { return err } } return nil } - if err := writeFile(path, "devices.allow", "a"); err != nil { + if err := fscommon.WriteFile(path, "devices.allow", "a"); err != nil { return err } } for _, dev := range cgroup.Resources.DeniedDevices { - if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil { + if err := fscommon.WriteFile(path, "devices.deny", dev.CgroupString()); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go index 4b19f8a970..9dc81bdb84 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go @@ -8,6 +8,7 @@ import ( "time" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -34,11 +35,11 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { // state, let's write again this state, hoping it's going to be properly // set this time. Otherwise, this loop could run infinitely, waiting for // a state change that would never happen. - if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { + if err := fscommon.WriteFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { return err } - state, err := readFile(path, "freezer.state") + state, err := fscommon.ReadFile(path, "freezer.state") if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go deleted file mode 100644 index 186de9ab41..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_v2.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "strings" - "time" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type FreezerGroupV2 struct { -} - -func (s *FreezerGroupV2) Name() string { - return "freezer" -} - -func (s *FreezerGroupV2) Apply(d *cgroupData) error { - _, err := d.join("freezer") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *FreezerGroupV2) Set(path string, cgroup *configs.Cgroup) error { - var desiredState string - filename := "cgroup.freeze" - if cgroup.Resources.Freezer == configs.Frozen { - desiredState = "1" - } else { - desiredState = "0" - } - - switch cgroup.Resources.Freezer { - case configs.Frozen, configs.Thawed: - for { - // In case this loop does not exit because it doesn't get the expected - // state, let's write again this state, hoping it's going to be properly - // set this time. Otherwise, this loop could run infinitely, waiting for - // a state change that would never happen. - if err := writeFile(path, filename, desiredState); err != nil { - return err - } - - state, err := readFile(path, filename) - if err != nil { - return err - } - if strings.TrimSpace(state) == desiredState { - break - } - - time.Sleep(1 * time.Millisecond) - } - case configs.Undefined: - return nil - default: - return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer)) - } - - return nil -} - -func (s *FreezerGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("freezer")) -} - -func (s *FreezerGroupV2) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go index 2f9727719d..68719c2e6d 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -28,7 +29,7 @@ func (s *HugetlbGroup) Apply(d *cgroupData) error { func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error { for _, hugetlb := range cgroup.Resources.HugetlbLimit { - if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil { + if err := fscommon.WriteFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil { return err } } @@ -44,21 +45,21 @@ func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error { hugetlbStats := cgroups.HugetlbStats{} for _, pageSize := range HugePageSizes { usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".") - value, err := getCgroupParamUint(path, usage) + value, err := fscommon.GetCgroupParamUint(path, usage) if err != nil { return fmt.Errorf("failed to parse %s - %v", usage, err) } hugetlbStats.Usage = value maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".") - value, err = getCgroupParamUint(path, maxUsage) + value, err = fscommon.GetCgroupParamUint(path, maxUsage) if err != nil { return fmt.Errorf("failed to parse %s - %v", maxUsage, err) } hugetlbStats.MaxUsage = value failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".") - value, err = getCgroupParamUint(path, failcnt) + value, err = fscommon.GetCgroupParamUint(path, failcnt) if err != nil { return fmt.Errorf("failed to parse %s - %v", failcnt, err) } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go deleted file mode 100644 index 84b1829451..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/io_v2.go +++ /dev/null @@ -1,191 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type IOGroupV2 struct { -} - -func (s *IOGroupV2) Name() string { - return "blkio" -} - -func (s *IOGroupV2) Apply(d *cgroupData) error { - _, err := d.join("blkio") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *IOGroupV2) Set(path string, cgroup *configs.Cgroup) error { - cgroupsv2 := cgroups.IsCgroup2UnifiedMode() - - if cgroup.Resources.BlkioWeight != 0 { - filename := "blkio.weight" - if cgroupsv2 { - filename = "io.bfq.weight" - } - if err := writeFile(path, filename, strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { - return err - } - } - - if cgroup.Resources.BlkioLeafWeight != 0 { - if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil { - return err - } - } - for _, wd := range cgroup.Resources.BlkioWeightDevice { - if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil { - return err - } - if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil { - return err - } - } - for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice { - if cgroupsv2 { - if err := writeFile(path, "io.max", td.StringName("rbps")); err != nil { - return err - } - } else { - if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil { - return err - } - } - } - for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice { - if cgroupsv2 { - if err := writeFile(path, "io.max", td.StringName("wbps")); err != nil { - return err - } - } else { - if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil { - return err - } - } - } - for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice { - if cgroupsv2 { - if err := writeFile(path, "io.max", td.StringName("riops")); err != nil { - return err - } - } else { - if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil { - return err - } - } - } - for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice { - if cgroupsv2 { - if err := writeFile(path, "io.max", td.StringName("wiops")); err != nil { - return err - } - } else { - if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil { - return err - } - } - } - - return nil -} - -func (s *IOGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("blkio")) -} - -func readCgroup2MapFile(path string, name string) (map[string][]string, error) { - ret := map[string][]string{} - p := filepath.Join("/sys/fs/cgroup", path, name) - f, err := os.Open(p) - if err != nil { - if os.IsNotExist(err) { - return ret, nil - } - return nil, err - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - if len(parts) < 2 { - continue - } - ret[parts[0]] = parts[1:] - } - if err := scanner.Err(); err != nil { - return nil, err - } - return ret, nil -} - -func (s *IOGroupV2) getCgroupV2Stats(path string, stats *cgroups.Stats) error { - // more details on the io.stat file format: https://www.kernel.org/doc/Documentation/cgroup-v2.txt - var ioServiceBytesRecursive []cgroups.BlkioStatEntry - values, err := readCgroup2MapFile(path, "io.stat") - if err != nil { - return err - } - for k, v := range values { - d := strings.Split(k, ":") - if len(d) != 2 { - continue - } - minor, err := strconv.ParseUint(d[0], 10, 0) - if err != nil { - return err - } - major, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - for _, item := range v { - d := strings.Split(item, "=") - if len(d) != 2 { - continue - } - op := d[0] - - // Accommodate the cgroup v1 naming - switch op { - case "rbytes": - op = "read" - case "wbytes": - op = "write" - } - - value, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - entry := cgroups.BlkioStatEntry{ - Op: op, - Major: major, - Minor: minor, - Value: value, - } - ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) - } - } - stats.BlkioStats = cgroups.BlkioStats{IoServiceBytesRecursive: ioServiceBytesRecursive} - return nil -} - -func (s *IOGroupV2) GetStats(path string, stats *cgroups.Stats) error { - return s.getCgroupV2Stats(path, stats) -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go index d5310d569f..f81ed050b2 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -84,28 +85,28 @@ func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error { // for memory and swap memory, so it won't fail because the new // value and the old value don't fit kernel's validation. if cgroup.Resources.MemorySwap == -1 || memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) { - if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { return err } - if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { return err } } else { - if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { return err } - if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { return err } } } else { if cgroup.Resources.Memory != 0 { - if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { return err } } if cgroup.Resources.MemorySwap != 0 { - if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { + if err := fscommon.WriteFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { return err } } @@ -126,25 +127,25 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error { } if cgroup.Resources.MemoryReservation != 0 { - if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { + if err := fscommon.WriteFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { return err } } if cgroup.Resources.KernelMemoryTCP != 0 { - if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil { + if err := fscommon.WriteFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil { return err } } if cgroup.Resources.OomKillDisable { - if err := writeFile(path, "memory.oom_control", "1"); err != nil { + if err := fscommon.WriteFile(path, "memory.oom_control", "1"); err != nil { return err } } if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 { return nil } else if *cgroup.Resources.MemorySwappiness <= 100 { - if err := writeFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil { + if err := fscommon.WriteFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil { return err } } else { @@ -171,7 +172,7 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { sc := bufio.NewScanner(statsFile) for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) + t, v, err := fscommon.GetCgroupParamKeyValue(sc.Text()) if err != nil { return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) } @@ -201,7 +202,7 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { stats.MemoryStats.KernelTCPUsage = kernelTCPUsage useHierarchy := strings.Join([]string{"memory", "use_hierarchy"}, ".") - value, err := getCgroupParamUint(path, useHierarchy) + value, err := fscommon.GetCgroupParamUint(path, useHierarchy) if err != nil { return err } @@ -233,7 +234,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { failcnt := strings.Join([]string{moduleName, "failcnt"}, ".") limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".") - value, err := getCgroupParamUint(path, usage) + value, err := fscommon.GetCgroupParamUint(path, usage) if err != nil { if moduleName != "memory" && os.IsNotExist(err) { return cgroups.MemoryData{}, nil @@ -241,7 +242,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err) } memoryData.Usage = value - value, err = getCgroupParamUint(path, maxUsage) + value, err = fscommon.GetCgroupParamUint(path, maxUsage) if err != nil { if moduleName != "memory" && os.IsNotExist(err) { return cgroups.MemoryData{}, nil @@ -249,7 +250,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err) } memoryData.MaxUsage = value - value, err = getCgroupParamUint(path, failcnt) + value, err = fscommon.GetCgroupParamUint(path, failcnt) if err != nil { if moduleName != "memory" && os.IsNotExist(err) { return cgroups.MemoryData{}, nil @@ -257,7 +258,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err) } memoryData.Failcnt = value - value, err = getCgroupParamUint(path, limit) + value, err = fscommon.GetCgroupParamUint(path, limit) if err != nil { if moduleName != "memory" && os.IsNotExist(err) { return cgroups.MemoryData{}, nil diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go deleted file mode 100644 index 2ad997bcc2..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_v2.go +++ /dev/null @@ -1,164 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type MemoryGroupV2 struct { -} - -func (s *MemoryGroupV2) Name() string { - return "memory" -} - -func (s *MemoryGroupV2) Apply(d *cgroupData) (err error) { - path, err := d.path("memory") - if err != nil && !cgroups.IsNotFound(err) { - return err - } else if path == "" { - return nil - } - if memoryAssigned(d.config) { - if _, err := os.Stat(path); os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - // Only enable kernel memory accouting when this cgroup - // is created by libcontainer, otherwise we might get - // error when people use `cgroupsPath` to join an existed - // cgroup whose kernel memory is not initialized. - if err := EnableKernelMemoryAccounting(path); err != nil { - return err - } - } - } - defer func() { - if err != nil { - os.RemoveAll(path) - } - }() - - // We need to join memory cgroup after set memory limits, because - // kmem.limit_in_bytes can only be set when the cgroup is empty. - _, err = d.join("memory") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func setMemoryAndSwapCgroups(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.MemorySwap != 0 { - if err := writeFile(path, "memory.swap.max", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { - return err - } - } - if cgroup.Resources.Memory != 0 { - if err := writeFile(path, "memory.max", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { - return err - } - } - return nil -} - -func (s *MemoryGroupV2) Set(path string, cgroup *configs.Cgroup) error { - - if err := setMemoryAndSwapCgroups(path, cgroup); err != nil { - return err - } - - if cgroup.Resources.KernelMemory != 0 { - if err := setKernelMemory(path, cgroup.Resources.KernelMemory); err != nil { - return err - } - } - - if cgroup.Resources.MemoryReservation != 0 { - if err := writeFile(path, "memory.high", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { - return err - } - } - - return nil -} - -func (s *MemoryGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("memory")) -} - -func (s *MemoryGroupV2) GetStats(path string, stats *cgroups.Stats) error { - // Set stats from memory.stat. - statsFile, err := os.Open(filepath.Join(path, "memory.stat")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer statsFile.Close() - - sc := bufio.NewScanner(statsFile) - for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) - if err != nil { - return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) - } - stats.MemoryStats.Stats[t] = v - } - stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"] - - memoryUsage, err := getMemoryDataV2(path, "") - if err != nil { - return err - } - stats.MemoryStats.Usage = memoryUsage - swapUsage, err := getMemoryDataV2(path, "swap") - if err != nil { - return err - } - stats.MemoryStats.SwapUsage = swapUsage - - stats.MemoryStats.UseHierarchy = true - return nil -} - -func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { - memoryData := cgroups.MemoryData{} - - moduleName := "memory" - if name != "" { - moduleName = strings.Join([]string{"memory", name}, ".") - } - usage := strings.Join([]string{moduleName, "current"}, ".") - limit := strings.Join([]string{moduleName, "max"}, ".") - - value, err := getCgroupParamUint(path, usage) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err) - } - memoryData.Usage = value - - value, err = getCgroupParamUint(path, limit) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err) - } - memoryData.Limit = value - - return memoryData, nil -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go index 8e74b645ea..0212015255 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -26,7 +27,7 @@ func (s *NetClsGroup) Apply(d *cgroupData) error { func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error { if cgroup.Resources.NetClsClassid != 0 { - if err := writeFile(path, "net_cls.classid", strconv.FormatUint(uint64(cgroup.Resources.NetClsClassid), 10)); err != nil { + if err := fscommon.WriteFile(path, "net_cls.classid", strconv.FormatUint(uint64(cgroup.Resources.NetClsClassid), 10)); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go index d0ab2af894..2bdeedf80a 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go @@ -4,6 +4,7 @@ package fs import ( "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -24,7 +25,7 @@ func (s *NetPrioGroup) Apply(d *cgroupData) error { func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error { for _, prioMap := range cgroup.Resources.NetPrioIfpriomap { - if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil { + if err := fscommon.WriteFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go index f1e3720551..7bf6801358 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go @@ -8,6 +8,7 @@ import ( "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" ) @@ -35,7 +36,7 @@ func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error { limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10) } - if err := writeFile(path, "pids.max", limit); err != nil { + if err := fscommon.WriteFile(path, "pids.max", limit); err != nil { return err } } @@ -48,12 +49,12 @@ func (s *PidsGroup) Remove(d *cgroupData) error { } func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error { - current, err := getCgroupParamUint(path, "pids.current") + current, err := fscommon.GetCgroupParamUint(path, "pids.current") if err != nil { return fmt.Errorf("failed to parse pids.current - %s", err) } - maxString, err := getCgroupParamString(path, "pids.max") + maxString, err := fscommon.GetCgroupParamString(path, "pids.max") if err != nil { return fmt.Errorf("failed to parse pids.max - %s", err) } @@ -61,7 +62,7 @@ func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error { // Default if pids.max == "max" is 0 -- which represents "no limit". var max uint64 if maxString != "max" { - max, err = parseUint(maxString, 10, 64) + max, err = fscommon.ParseUint(maxString, 10, 64) if err != nil { return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max")) } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go deleted file mode 100644 index 3413a2a0d1..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_v2.go +++ /dev/null @@ -1,107 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "golang.org/x/sys/unix" -) - -type PidsGroupV2 struct { -} - -func (s *PidsGroupV2) Name() string { - return "pids" -} - -func (s *PidsGroupV2) Apply(d *cgroupData) error { - _, err := d.join("pids") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *PidsGroupV2) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.PidsLimit != 0 { - // "max" is the fallback value. - limit := "max" - - if cgroup.Resources.PidsLimit > 0 { - limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10) - } - - if err := writeFile(path, "pids.max", limit); err != nil { - return err - } - } - - return nil -} - -func (s *PidsGroupV2) Remove(d *cgroupData) error { - return removePath(d.path("pids")) -} - -func isNOTSUP(err error) bool { - switch err := err.(type) { - case *os.PathError: - return err.Err == unix.ENOTSUP - default: - return false - } -} - -func (s *PidsGroupV2) GetStats(path string, stats *cgroups.Stats) error { - current, err := getCgroupParamUint(path, "pids.current") - if os.IsNotExist(err) { - // if the controller is not enabled, let's read the list - // PIDs (or threads if cgroup.threads is enabled) - contents, err := ioutil.ReadFile(filepath.Join(path, "cgroup.procs")) - if err != nil && isNOTSUP(err) { - contents, err = ioutil.ReadFile(filepath.Join(path, "cgroup.threads")) - } - if err != nil { - return err - } - pids := make(map[string]string) - for _, i := range strings.Split(string(contents), "\n") { - if i != "" { - pids[i] = i - } - } - stats.PidsStats.Current = uint64(len(pids)) - stats.PidsStats.Limit = 0 - return nil - - } - if err != nil { - return fmt.Errorf("failed to parse pids.current - %s", err) - } - - maxString, err := getCgroupParamString(path, "pids.max") - if err != nil { - return fmt.Errorf("failed to parse pids.max - %s", err) - } - - // Default if pids.max == "max" is 0 -- which represents "no limit". - var max uint64 - if maxString != "max" { - max, err = parseUint(maxString, 10, 64) - if err != nil { - return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max")) - } - } - - stats.PidsStats.Current = current - stats.PidsStats.Limit = max - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go new file mode 100644 index 0000000000..f0f5df09a8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go @@ -0,0 +1,56 @@ +// +build linux + +package fs2 + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" +) + +func setCpu(dirPath string, cgroup *configs.Cgroup) error { + if cgroup.Resources.CpuWeight != 0 { + if err := fscommon.WriteFile(dirPath, "cpu.weight", strconv.FormatUint(cgroup.Resources.CpuWeight, 10)); err != nil { + return err + } + } + + if cgroup.Resources.CpuMax != "" { + if err := fscommon.WriteFile(dirPath, "cpu.max", cgroup.Resources.CpuMax); err != nil { + return err + } + } + + return nil +} +func statCpu(dirPath string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(dirPath, "cpu.stat")) + if err != nil { + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := fscommon.GetCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "usage_usec": + stats.CpuStats.CpuUsage.TotalUsage = v * 1000 + + case "user_usec": + stats.CpuStats.CpuUsage.UsageInUsermode = v * 1000 + + case "system_usec": + stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000 + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go new file mode 100644 index 0000000000..6492ac93c5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go @@ -0,0 +1,22 @@ +// +build linux + +package fs2 + +import ( + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" +) + +func setCpuset(dirPath string, cgroup *configs.Cgroup) error { + if cgroup.Resources.CpusetCpus != "" { + if err := fscommon.WriteFile(dirPath, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { + return err + } + } + if cgroup.Resources.CpusetMems != "" { + if err := fscommon.WriteFile(dirPath, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { + return err + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go new file mode 100644 index 0000000000..e84b33f2c7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go @@ -0,0 +1,99 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs2 + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" + "github.com/pkg/errors" +) + +const UnifiedMountpoint = "/sys/fs/cgroup" + +func defaultDirPath(c *configs.Cgroup) (string, error) { + if (c.Name != "" || c.Parent != "") && c.Path != "" { + return "", errors.Errorf("cgroup: either Path or Name and Parent should be used, got %+v", c) + } + if len(c.Paths) != 0 { + // never set by specconv + return "", errors.Errorf("cgroup: Paths is unsupported, use Path, got %+v", c) + } + + // XXX: Do not remove this code. Path safety is important! -- cyphar + cgPath := libcontainerUtils.CleanPath(c.Path) + cgParent := libcontainerUtils.CleanPath(c.Parent) + cgName := libcontainerUtils.CleanPath(c.Name) + + ownCgroup, err := parseCgroupFile("/proc/self/cgroup") + if err != nil { + return "", err + } + return _defaultDirPath(UnifiedMountpoint, cgPath, cgParent, cgName, ownCgroup) +} + +func _defaultDirPath(root, cgPath, cgParent, cgName, ownCgroup string) (string, error) { + if (cgName != "" || cgParent != "") && cgPath != "" { + return "", errors.New("cgroup: either Path or Name and Parent should be used") + } + innerPath := cgPath + if innerPath == "" { + innerPath = filepath.Join(cgParent, cgName) + } + if filepath.IsAbs(innerPath) { + return filepath.Join(root, innerPath), nil + } + return filepath.Join(root, ownCgroup, innerPath), nil +} + +// parseCgroupFile parses /proc/PID/cgroup file and return string +func parseCgroupFile(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + return parseCgroupFromReader(f) +} + +func parseCgroupFromReader(r io.Reader) (string, error) { + var ( + s = bufio.NewScanner(r) + ) + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + var ( + text = s.Text() + parts = strings.SplitN(text, ":", 3) + ) + if len(parts) < 3 { + return "", errors.Errorf("invalid cgroup entry: %q", text) + } + // text is like "0::/user.slice/user-1001.slice/session-1.scope" + if parts[0] == "0" && parts[1] == "" { + return parts[2], nil + } + } + return "", errors.New("cgroup path not found") +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go new file mode 100644 index 0000000000..e0fd685402 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go @@ -0,0 +1,73 @@ +// +build linux + +package fs2 + +import ( + "github.com/opencontainers/runc/libcontainer/cgroups/ebpf" + "github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func isRWM(cgroupPermissions string) bool { + r := false + w := false + m := false + for _, rn := range cgroupPermissions { + switch rn { + case 'r': + r = true + case 'w': + w = true + case 'm': + m = true + } + } + return r && w && m +} + +// the logic is from crun +// https://github.com/containers/crun/blob/0.10.2/src/libcrun/cgroup.c#L1644-L1652 +func canSkipEBPFError(cgroup *configs.Cgroup) bool { + for _, dev := range cgroup.Resources.Devices { + if dev.Allow || !isRWM(dev.Permissions) { + return false + } + } + return true +} + +func setDevices(dirPath string, cgroup *configs.Cgroup) error { + devices := cgroup.Devices + if allowAllDevices := cgroup.Resources.AllowAllDevices; allowAllDevices != nil { + // never set by OCI specconv, but *allowAllDevices=false is still used by the integration test + if *allowAllDevices == true { + return errors.New("libcontainer AllowAllDevices is not supported, use Devices") + } + for _, ad := range cgroup.Resources.AllowedDevices { + d := *ad + d.Allow = true + devices = append(devices, &d) + } + } + if len(cgroup.Resources.DeniedDevices) != 0 { + // never set by OCI specconv + return errors.New("libcontainer DeniedDevices is not supported, use Devices") + } + insts, license, err := devicefilter.DeviceFilter(devices) + if err != nil { + return err + } + dirFD, err := unix.Open(dirPath, unix.O_DIRECTORY|unix.O_RDONLY, 0600) + if err != nil { + return errors.Errorf("cannot get dir FD for %s", dirPath) + } + defer unix.Close(dirFD) + if _, err := ebpf.LoadAttachCgroupDeviceFilter(insts, license, dirFD); err != nil { + if !canSkipEBPFError(cgroup) { + return err + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go new file mode 100644 index 0000000000..130c63f3e2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go @@ -0,0 +1,53 @@ +// +build linux + +package fs2 + +import ( + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" +) + +func setFreezer(dirPath string, state configs.FreezerState) error { + var desired int + switch state { + case configs.Undefined: + return nil + case configs.Frozen: + desired = 1 + case configs.Thawed: + desired = 0 + default: + return errors.Errorf("unknown freezer state %+v", state) + } + supportedErr := supportsFreezer(dirPath) + if supportedErr != nil && desired != 0 { + // can ignore error if desired == 1 + return errors.Wrap(supportedErr, "freezer not supported") + } + return freezeWithInt(dirPath, desired) +} + +func supportsFreezer(dirPath string) error { + _, err := fscommon.ReadFile(dirPath, "cgroup.freeze") + return err +} + +// freeze writes desired int to "cgroup.freeze". +func freezeWithInt(dirPath string, desired int) error { + desiredS := strconv.Itoa(desired) + if err := fscommon.WriteFile(dirPath, "cgroup.freeze", desiredS); err != nil { + return err + } + got, err := fscommon.ReadFile(dirPath, "cgroup.freeze") + if err != nil { + return err + } + if gotS := strings.TrimSpace(string(got)); gotS != desiredS { + return errors.Errorf("expected \"cgroup.freeze\" in %q to be %q, got %q", dirPath, desiredS, gotS) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go new file mode 100644 index 0000000000..4bb7091a18 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go @@ -0,0 +1,214 @@ +// +build linux + +package fs2 + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" +) + +// NewManager creates a manager for cgroup v2 unified hierarchy. +// dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope". +// If dirPath is empty, it is automatically set using config. +func NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) { + if config == nil { + config = &configs.Cgroup{} + } + if dirPath != "" { + if filepath.Clean(dirPath) != dirPath || !filepath.IsAbs(dirPath) { + return nil, errors.Errorf("invalid dir path %q", dirPath) + } + } else { + var err error + dirPath, err = defaultDirPath(config) + if err != nil { + return nil, err + } + } + controllers, err := detectControllers(dirPath) + if err != nil && !rootless { + return nil, err + } + + m := &manager{ + config: config, + dirPath: dirPath, + controllers: controllers, + rootless: rootless, + } + return m, nil +} + +func detectControllers(dirPath string) (map[string]struct{}, error) { + if err := os.MkdirAll(dirPath, 0755); err != nil { + return nil, err + } + controllersPath, err := securejoin.SecureJoin(dirPath, "cgroup.controllers") + if err != nil { + return nil, err + } + controllersData, err := ioutil.ReadFile(controllersPath) + if err != nil { + return nil, err + } + controllersFields := strings.Fields(string(controllersData)) + controllers := make(map[string]struct{}, len(controllersFields)) + for _, c := range controllersFields { + controllers[c] = struct{}{} + } + return controllers, nil +} + +type manager struct { + config *configs.Cgroup + // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope" + dirPath string + // controllers is content of "cgroup.controllers" file. + // excludes pseudo-controllers ("devices" and "freezer"). + controllers map[string]struct{} + rootless bool +} + +func (m *manager) Apply(pid int) error { + if err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless { + return err + } + return nil +} + +func (m *manager) GetPids() ([]int, error) { + return cgroups.GetPids(m.dirPath) +} + +func (m *manager) GetAllPids() ([]int, error) { + return cgroups.GetAllPids(m.dirPath) +} + +func (m *manager) GetStats() (*cgroups.Stats, error) { + var ( + st cgroups.Stats + errs []error + ) + // pids (since kernel 4.5) + if _, ok := m.controllers["pids"]; ok { + if err := statPids(m.dirPath, &st); err != nil { + errs = append(errs, err) + } + } else { + if err := statPidsWithoutController(m.dirPath, &st); err != nil { + errs = append(errs, err) + } + } + // memory (since kenrel 4.5) + if _, ok := m.controllers["memory"]; ok { + if err := statMemory(m.dirPath, &st); err != nil { + errs = append(errs, err) + } + } + // io (since kernel 4.5) + if _, ok := m.controllers["io"]; ok { + if err := statIo(m.dirPath, &st); err != nil { + errs = append(errs, err) + } + } + // cpu (since kernel 4.15) + if _, ok := m.controllers["cpu"]; ok { + if err := statCpu(m.dirPath, &st); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 && !m.rootless { + return &st, errors.Errorf("error while statting cgroup v2: %+v", errs) + } + return &st, nil +} + +func (m *manager) Freeze(state configs.FreezerState) error { + if err := setFreezer(m.dirPath, state); err != nil { + return err + } + m.config.Resources.Freezer = state + return nil +} + +func (m *manager) Destroy() error { + return os.RemoveAll(m.dirPath) +} + +// GetPaths is for compatibility purpose and should be removed in future +func (m *manager) GetPaths() map[string]string { + paths := map[string]string{ + // pseudo-controller for compatibility + "devices": m.dirPath, + "freezer": m.dirPath, + } + for c := range m.controllers { + paths[c] = m.dirPath + } + return paths +} + +func (m *manager) GetUnifiedPath() (string, error) { + return m.dirPath, nil +} + +func (m *manager) Set(container *configs.Config) error { + if container == nil || container.Cgroups == nil { + return nil + } + var errs []error + // pids (since kernel 4.5) + if _, ok := m.controllers["pids"]; ok { + if err := setPids(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + } + // memory (since kernel 4.5) + if _, ok := m.controllers["memory"]; ok { + if err := setMemory(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + } + // io (since kernel 4.5) + if _, ok := m.controllers["io"]; ok { + if err := setIo(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + } + // cpu (since kernel 4.15) + if _, ok := m.controllers["cpu"]; ok { + if err := setCpu(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + } + // devices (since kernel 4.15, pseudo-controller) + if err := setDevices(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + // cpuset (since kernel 5.0) + if _, ok := m.controllers["cpuset"]; ok { + if err := setCpuset(m.dirPath, container.Cgroups); err != nil { + errs = append(errs, err) + } + } + // freezer (since kernel 5.2, pseudo-controller) + if err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 && !m.rootless { + return errors.Errorf("error while setting cgroup v2: %+v", errs) + } + m.config = container.Cgroups + return nil +} + +func (m *manager) GetCgroups() (*configs.Cgroup, error) { + return m.config, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go new file mode 100644 index 0000000000..9a07308802 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go @@ -0,0 +1,124 @@ +// +build linux + +package fs2 + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" +) + +func setIo(dirPath string, cgroup *configs.Cgroup) error { + if cgroup.Resources.BlkioWeight != 0 { + filename := "io.bfq.weight" + if err := fscommon.WriteFile(dirPath, filename, strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { + return err + } + } + + for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice { + if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("rbps")); err != nil { + return err + } + } + for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice { + if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("wbps")); err != nil { + return err + } + } + for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice { + if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("riops")); err != nil { + return err + } + } + for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice { + if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("wiops")); err != nil { + return err + } + } + + return nil +} + +func readCgroup2MapFile(dirPath string, name string) (map[string][]string, error) { + ret := map[string][]string{} + p := filepath.Join(dirPath, name) + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + ret[parts[0]] = parts[1:] + } + if err := scanner.Err(); err != nil { + return nil, err + } + return ret, nil +} + +func statIo(dirPath string, stats *cgroups.Stats) error { + // more details on the io.stat file format: https://www.kernel.org/doc/Documentation/cgroup-v2.txt + var ioServiceBytesRecursive []cgroups.BlkioStatEntry + values, err := readCgroup2MapFile(dirPath, "io.stat") + if err != nil { + return err + } + for k, v := range values { + d := strings.Split(k, ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + for _, item := range v { + d := strings.Split(item, "=") + if len(d) != 2 { + continue + } + op := d[0] + + // Accommodate the cgroup v1 naming + switch op { + case "rbytes": + op = "read" + case "wbytes": + op = "write" + } + + value, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + entry := cgroups.BlkioStatEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + } + stats.BlkioStats = cgroups.BlkioStats{IoServiceBytesRecursive: ioServiceBytesRecursive} + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go new file mode 100644 index 0000000000..23eccbec40 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go @@ -0,0 +1,103 @@ +// +build linux + +package fs2 + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" +) + +func setMemory(dirPath string, cgroup *configs.Cgroup) error { + if cgroup.Resources.MemorySwap != 0 { + if err := fscommon.WriteFile(dirPath, "memory.swap.max", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { + return err + } + } + if cgroup.Resources.Memory != 0 { + if err := fscommon.WriteFile(dirPath, "memory.max", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { + return err + } + } + + // cgroup.Resources.KernelMemory is ignored + + if cgroup.Resources.MemoryReservation != 0 { + if err := fscommon.WriteFile(dirPath, "memory.low", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { + return err + } + } + + return nil +} + +func statMemory(dirPath string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(dirPath, "memory.stat")) + if err != nil { + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := fscommon.GetCgroupParamKeyValue(sc.Text()) + if err != nil { + return errors.Wrapf(err, "failed to parse memory.stat (%q)", sc.Text()) + } + stats.MemoryStats.Stats[t] = v + } + stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"] + + memoryUsage, err := getMemoryDataV2(dirPath, "") + if err != nil { + return err + } + stats.MemoryStats.Usage = memoryUsage + swapUsage, err := getMemoryDataV2(dirPath, "swap") + if err != nil { + return err + } + stats.MemoryStats.SwapUsage = swapUsage + + stats.MemoryStats.UseHierarchy = true + return nil +} + +func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { + memoryData := cgroups.MemoryData{} + + moduleName := "memory" + if name != "" { + moduleName = strings.Join([]string{"memory", name}, ".") + } + usage := strings.Join([]string{moduleName, "current"}, ".") + limit := strings.Join([]string{moduleName, "max"}, ".") + + value, err := fscommon.GetCgroupParamUint(path, usage) + if err != nil { + if moduleName != "memory" && os.IsNotExist(err) { + return cgroups.MemoryData{}, nil + } + return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", usage) + } + memoryData.Usage = value + + value, err = fscommon.GetCgroupParamUint(path, limit) + if err != nil { + if moduleName != "memory" && os.IsNotExist(err) { + return cgroups.MemoryData{}, nil + } + return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", limit) + } + memoryData.Limit = value + + return memoryData, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go new file mode 100644 index 0000000000..db2d7ac90d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go @@ -0,0 +1,90 @@ +// +build linux + +package fs2 + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func setPids(dirPath string, cgroup *configs.Cgroup) error { + if cgroup.Resources.PidsLimit != 0 { + // "max" is the fallback value. + limit := "max" + + if cgroup.Resources.PidsLimit > 0 { + limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10) + } + + if err := fscommon.WriteFile(dirPath, "pids.max", limit); err != nil { + return err + } + } + + return nil +} + +func isNOTSUP(err error) bool { + switch err := err.(type) { + case *os.PathError: + return err.Err == unix.ENOTSUP + default: + return false + } +} + +func statPidsWithoutController(dirPath string, stats *cgroups.Stats) error { + // if the controller is not enabled, let's read PIDS from cgroups.procs + // (or threads if cgroup.threads is enabled) + contents, err := ioutil.ReadFile(filepath.Join(dirPath, "cgroup.procs")) + if err != nil && isNOTSUP(err) { + contents, err = ioutil.ReadFile(filepath.Join(dirPath, "cgroup.threads")) + } + if err != nil { + return err + } + pids := make(map[string]string) + for _, i := range strings.Split(string(contents), "\n") { + if i != "" { + pids[i] = i + } + } + stats.PidsStats.Current = uint64(len(pids)) + stats.PidsStats.Limit = 0 + return nil +} + +func statPids(dirPath string, stats *cgroups.Stats) error { + current, err := fscommon.GetCgroupParamUint(dirPath, "pids.current") + if err != nil { + return errors.Wrap(err, "failed to parse pids.current") + } + + maxString, err := fscommon.GetCgroupParamString(dirPath, "pids.max") + if err != nil { + return errors.Wrap(err, "failed to parse pids.max") + } + + // Default if pids.max == "max" is 0 -- which represents "no limit". + var max uint64 + if maxString != "max" { + max, err = fscommon.ParseUint(maxString, 10, 64) + if err != nil { + return errors.Wrapf(err, "failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", + maxString, filepath.Join(dirPath, "pids.max")) + } + } + + stats.PidsStats.Current = current + stats.PidsStats.Limit = max + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go new file mode 100644 index 0000000000..dd92e8c8c6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go @@ -0,0 +1,36 @@ +// +build linux + +package fscommon + +import ( + "io/ioutil" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/pkg/errors" +) + +func WriteFile(dir, file, data string) error { + if dir == "" { + return errors.Errorf("no directory specified for %s", file) + } + path, err := securejoin.SecureJoin(dir, file) + if err != nil { + return err + } + if err := ioutil.WriteFile(path, []byte(data), 0700); err != nil { + return errors.Wrapf(err, "failed to write %q to %q", data, path) + } + return nil +} + +func ReadFile(dir, file string) (string, error) { + if dir == "" { + return "", errors.Errorf("no directory specified for %s", file) + } + path, err := securejoin.SecureJoin(dir, file) + if err != nil { + return "", err + } + data, err := ioutil.ReadFile(path) + return string(data), err +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go similarity index 83% rename from cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go rename to cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go index 30922777f5..46c3c7799e 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go @@ -1,6 +1,6 @@ // +build linux -package fs +package fscommon import ( "errors" @@ -18,7 +18,7 @@ var ( // Saturates negative values at zero and returns a uint64. // Due to kernel bugs, some of the memory cgroup stats can be negative. -func parseUint(s string, base, bitSize int) (uint64, error) { +func ParseUint(s string, base, bitSize int) (uint64, error) { value, err := strconv.ParseUint(s, base, bitSize) if err != nil { intValue, intErr := strconv.ParseInt(s, base, bitSize) @@ -38,11 +38,11 @@ func parseUint(s string, base, bitSize int) (uint64, error) { // Parses a cgroup param and returns as name, value // i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 -func getCgroupParamKeyValue(t string) (string, uint64, error) { +func GetCgroupParamKeyValue(t string) (string, uint64, error) { parts := strings.Fields(t) switch len(parts) { case 2: - value, err := parseUint(parts[1], 10, 64) + value, err := ParseUint(parts[1], 10, 64) if err != nil { return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err) } @@ -54,7 +54,7 @@ func getCgroupParamKeyValue(t string) (string, uint64, error) { } // Gets a single uint64 value from the specified cgroup file. -func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { +func GetCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { fileName := filepath.Join(cgroupPath, cgroupFile) contents, err := ioutil.ReadFile(fileName) if err != nil { @@ -65,7 +65,7 @@ func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { return math.MaxUint64, nil } - res, err := parseUint(trimmed, 10, 64) + res, err := ParseUint(trimmed, 10, 64) if err != nil { return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName) } @@ -73,7 +73,7 @@ func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { } // Gets a string value from the specified cgroup file -func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) { +func GetCgroupParamString(cgroupPath, cgroupFile string) (string, error) { contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) if err != nil { return "", err diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go index c171365be6..ef0db5aeb0 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -1,4 +1,4 @@ -// +build !linux static_build +// +build !linux package systemd @@ -42,6 +42,10 @@ func (m *Manager) GetPaths() map[string]string { return nil } +func (m *Manager) GetUnifiedPath() (string, error) { + return "", fmt.Errorf("Systemd not supported") +} + func (m *Manager) GetStats() (*cgroups.Stats, error) { return nil, fmt.Errorf("Systemd not supported") } @@ -57,3 +61,7 @@ func (m *Manager) Freeze(state configs.FreezerState) error { func Freeze(c *configs.Cgroup, state configs.FreezerState) error { return fmt.Errorf("Systemd not supported") } + +func (m *Manager) GetCgroups() (*configs.Cgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go index f274a49a3c..c4b19b3e6d 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go @@ -1,4 +1,4 @@ -// +build linux,!static_build +// +build linux package systemd @@ -297,6 +297,10 @@ func (m *LegacyManager) GetPaths() map[string]string { return paths } +func (m *LegacyManager) GetUnifiedPath() (string, error) { + return "", errors.New("unified path is only supported when running in unified mode") +} + func join(c *configs.Cgroup, subsystem string, pid int) (string, error) { path, err := getSubsystemPath(c, subsystem) if err != nil { @@ -524,3 +528,7 @@ func isUnitExists(err error) bool { } return false } + +func (m *LegacyManager) GetCgroups() (*configs.Cgroup, error) { + return m.Cgroups, nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go index d394e3a5a8..6605099190 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unified_hierarchy.go @@ -1,4 +1,4 @@ -// +build linux,!static_build +// +build linux package systemd @@ -14,8 +14,9 @@ import ( systemdDbus "github.com/coreos/go-systemd/dbus" "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -25,15 +26,6 @@ type UnifiedManager struct { Paths map[string]string } -var unifiedSubsystems = subsystemSet{ - &fs.CpusetGroupV2{}, - &fs.FreezerGroupV2{}, - &fs.CpuGroupV2{}, - &fs.MemoryGroupV2{}, - &fs.IOGroupV2{}, - &fs.PidsGroupV2{}, -} - func (m *UnifiedManager) Apply(pid int) error { var ( c = m.Cgroups @@ -159,19 +151,19 @@ func (m *UnifiedManager) Apply(pid int) error { return err } - paths := make(map[string]string) - for _, s := range unifiedSubsystems { - subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name()) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - return err - } - paths[s.Name()] = subsystemPath + path, err := getSubsystemPath(m.Cgroups, "") + if err != nil { + return err + } + m.Paths = map[string]string{ + "pids": path, + "memory": path, + "io": path, + "cpu": path, + "devices": path, + "cpuset": path, + "freezer": path, } - m.Paths = paths return nil } @@ -195,7 +187,24 @@ func (m *UnifiedManager) GetPaths() map[string]string { m.mu.Unlock() return paths } - +func (m *UnifiedManager) GetUnifiedPath() (string, error) { + unifiedPath := "" + m.mu.Lock() + defer m.mu.Unlock() + for k, v := range m.Paths { + if unifiedPath == "" { + unifiedPath = v + } else if v != unifiedPath { + return unifiedPath, + errors.Errorf("expected %q path to be unified path %q, got %q", k, unifiedPath, v) + } + } + if unifiedPath == "" { + // FIXME: unified path could be detected even when no controller is available + return unifiedPath, errors.New("cannot detect unified path") + } + return unifiedPath, nil +} func createCgroupsv2Path(path string) (Err error) { content, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") if err != nil { @@ -250,27 +259,24 @@ func joinCgroupsV2(c *configs.Cgroup, pid int) error { return createCgroupsv2Path(path) } +func (m *UnifiedManager) fsManager() (cgroups.Manager, error) { + path, err := m.GetUnifiedPath() + if err != nil { + return nil, err + } + return fs2.NewManager(m.Cgroups, path, false) +} + func (m *UnifiedManager) Freeze(state configs.FreezerState) error { - path, err := getSubsystemPath(m.Cgroups, "freezer") + fsMgr, err := m.fsManager() if err != nil { return err } - prevState := m.Cgroups.Resources.Freezer - m.Cgroups.Resources.Freezer = state - freezer, err := unifiedSubsystems.Get("freezer") - if err != nil { - return err - } - err = freezer.Set(path, m.Cgroups) - if err != nil { - m.Cgroups.Resources.Freezer = prevState - return err - } - return nil + return fsMgr.Freeze(state) } func (m *UnifiedManager) GetPids() ([]int, error) { - path, err := getSubsystemPath(m.Cgroups, "devices") + path, err := m.GetUnifiedPath() if err != nil { return nil, err } @@ -278,7 +284,7 @@ func (m *UnifiedManager) GetPids() ([]int, error) { } func (m *UnifiedManager) GetAllPids() ([]int, error) { - path, err := getSubsystemPath(m.Cgroups, "devices") + path, err := m.GetUnifiedPath() if err != nil { return nil, err } @@ -286,44 +292,21 @@ func (m *UnifiedManager) GetAllPids() ([]int, error) { } func (m *UnifiedManager) GetStats() (*cgroups.Stats, error) { - m.mu.Lock() - defer m.mu.Unlock() - stats := cgroups.NewStats() - for name, path := range m.Paths { - sys, err := unifiedSubsystems.Get(name) - if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) { - continue - } - if err := sys.GetStats(path, stats); err != nil { - return nil, err - } + fsMgr, err := m.fsManager() + if err != nil { + return nil, err } - - return stats, nil + return fsMgr.GetStats() } func (m *UnifiedManager) Set(container *configs.Config) error { - // If Paths are set, then we are just joining cgroups paths - // and there is no need to set any values. - if m.Cgroups.Paths != nil { - return nil + fsMgr, err := m.fsManager() + if err != nil { + return err } - for _, sys := range unifiedSubsystems { - // Get the subsystem path, but don't error out for not found cgroups. - path, err := getSubsystemPath(container.Cgroups, sys.Name()) - if err != nil && !cgroups.IsNotFound(err) { - return err - } - - if err := sys.Set(path, container.Cgroups); err != nil { - return err - } - } - - if m.Paths["cpu"] != "" { - if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil { - return err - } - } - return nil + return fsMgr.Set(container) +} + +func (m *UnifiedManager) GetCgroups() (*configs.Cgroup, error) { + return m.Cgroups, nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index 60790f83b4..dbcc58f5b3 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -20,8 +20,9 @@ import ( ) const ( - CgroupNamePrefix = "name=" - CgroupProcesses = "cgroup.procs" + CgroupNamePrefix = "name=" + CgroupProcesses = "cgroup.procs" + unifiedMountpoint = "/sys/fs/cgroup" ) var ( @@ -40,7 +41,7 @@ var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"} func IsCgroup2UnifiedMode() bool { isUnifiedOnce.Do(func() { var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + if err := syscall.Statfs(unifiedMountpoint, &st); err != nil { panic("cannot statfs cgroup root") } isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC @@ -50,6 +51,9 @@ func IsCgroup2UnifiedMode() bool { // https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { + if IsCgroup2UnifiedMode() { + return unifiedMountpoint, nil + } mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) return mnt, err } @@ -235,8 +239,8 @@ func GetCgroupMounts(all bool) ([]Mount, error) { return nil, err } m := Mount{ - Mountpoint: "/sys/fs/cgroup", - Root: "/sys/fs/cgroup", + Mountpoint: unifiedMountpoint, + Root: unifiedMountpoint, Subsystems: availableControllers, } return []Mount{m}, nil @@ -262,6 +266,21 @@ func GetCgroupMounts(all bool) ([]Mount, error) { // GetAllSubsystems returns all the cgroup subsystems supported by the kernel func GetAllSubsystems() ([]string, error) { + // /proc/cgroups is meaningless for v2 + // https://github.com/torvalds/linux/blob/v5.3/Documentation/admin-guide/cgroup-v2.rst#deprecated-v1-core-features + if IsCgroup2UnifiedMode() { + // "pseudo" controllers do not appear in /sys/fs/cgroup/cgroup.controllers. + // - devices: implemented in kernel 4.15 + // - freezer: implemented in kernel 5.2 + // We assume these are always available, as it is hard to detect availability. + pseudo := []string{"devices", "freezer"} + data, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") + if err != nil { + return nil, err + } + subsystems := append(pseudo, strings.Fields(string(data))...) + return subsystems, nil + } f, err := os.Open("/proc/cgroups") if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 6ff4d96a5f..fe70c937aa 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -265,22 +265,24 @@ func (c *linuxContainer) Exec() error { func (c *linuxContainer) exec() error { path := filepath.Join(c.root, execFifoFilename) + pid := c.initProcess.pid() + blockingFifoOpenCh := awaitFifoOpen(path) + for { + select { + case result := <-blockingFifoOpenCh: + return handleFifoResult(result) - fifoOpen := make(chan struct{}) - select { - case <-awaitProcessExit(c.initProcess.pid(), fifoOpen): - return errors.New("container process is already dead") - case result := <-awaitFifoOpen(path): - close(fifoOpen) - if result.err != nil { - return result.err + case <-time.After(time.Millisecond * 100): + stat, err := system.Stat(pid) + if err != nil || stat.State == system.Zombie { + // could be because process started, ran, and completed between our 100ms timeout and our system.Stat() check. + // see if the fifo exists and has data (with a non-blocking open, which will succeed if the writing process is complete). + if err := handleFifoResult(fifoOpen(path, false)); err != nil { + return errors.New("container process is already dead") + } + return nil + } } - f := result.file - defer f.Close() - if err := readFromExecFifo(f); err != nil { - return err - } - return os.Remove(path) } } @@ -295,38 +297,39 @@ func readFromExecFifo(execFifo io.Reader) error { return nil } -func awaitProcessExit(pid int, exit <-chan struct{}) <-chan struct{} { - isDead := make(chan struct{}) - go func() { - for { - select { - case <-exit: - return - case <-time.After(time.Millisecond * 100): - stat, err := system.Stat(pid) - if err != nil || stat.State == system.Zombie { - close(isDead) - return - } - } - } - }() - return isDead -} - func awaitFifoOpen(path string) <-chan openResult { fifoOpened := make(chan openResult) go func() { - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - fifoOpened <- openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")} - return - } - fifoOpened <- openResult{file: f} + result := fifoOpen(path, true) + fifoOpened <- result }() return fifoOpened } +func fifoOpen(path string, block bool) openResult { + flags := os.O_RDONLY + if !block { + flags |= syscall.O_NONBLOCK + } + f, err := os.OpenFile(path, flags, 0) + if err != nil { + return openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")} + } + return openResult{file: f} +} + +func handleFifoResult(result openResult) error { + if result.err != nil { + return result.err + } + f := result.file + defer f.Close() + if err := readFromExecFifo(f); err != nil { + return err + } + return os.Remove(f.Name()) +} + type openResult struct { file *os.File err error @@ -940,7 +943,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { // Since a container can be C/R'ed multiple times, // the checkpoint directory may already exist. - if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { + if err := os.Mkdir(criuOpts.ImagesDirectory, 0700); err != nil && !os.IsExist(err) { return err } @@ -948,7 +951,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") } - if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { + if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) { return err } @@ -1111,7 +1114,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { return err } - err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) + err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0600) if err != nil { return err } @@ -1246,7 +1249,7 @@ func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { } // Since a container can be C/R'ed multiple times, // the work directory may already exist. - if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { + if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) { return err } workDir, err := os.Open(criuOpts.WorkDirectory) @@ -1824,7 +1827,7 @@ func (c *linuxContainer) isPaused() (bool, error) { data, err := ioutil.ReadFile(filepath.Join(fcg, filename)) if err != nil { // If freezer cgroup is not mounted, the container would just be not paused. - if os.IsNotExist(err) { + if os.IsNotExist(err) || err == syscall.ENODEV { return false, nil } return false, newSystemErrorWithCause(err, "checking if container is paused") diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index 78a8c0a813..437633c6e4 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -14,12 +14,14 @@ import ( "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs/validate" "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/mount" "github.com/opencontainers/runc/libcontainer/utils" + "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -59,10 +61,37 @@ func SystemdCgroups(l *LinuxFactory) error { return nil } +func getUnifiedPath(paths map[string]string) string { + unifiedPath := "" + for k, v := range paths { + if unifiedPath == "" { + unifiedPath = v + } else if v != unifiedPath { + panic(errors.Errorf("expected %q path to be unified path %q, got %q", k, unifiedPath, v)) + } + } + // can be empty + return unifiedPath +} + +func cgroupfs2(l *LinuxFactory, rootless bool) error { + l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { + m, err := fs2.NewManager(config, getUnifiedPath(paths), rootless) + if err != nil { + panic(err) + } + return m + } + return nil +} + // Cgroupfs is an options func to configure a LinuxFactory to return containers // that use the native cgroups filesystem implementation to create and manage // cgroups. func Cgroupfs(l *LinuxFactory) error { + if cgroups.IsCgroup2UnifiedMode() { + return cgroupfs2(l, false) + } l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { return &fs.Manager{ Cgroups: config, @@ -79,6 +108,9 @@ func Cgroupfs(l *LinuxFactory) error { // during rootless container (including euid=0 in userns) setup (while still allowing cgroup usage if // they've been set up properly). func RootlessCgroupfs(l *LinuxFactory) error { + if cgroups.IsCgroup2UnifiedMode() { + return cgroupfs2(l, true) + } l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { return &fs.Manager{ Cgroups: config, diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index cd7ff67a70..c1b1560020 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -127,6 +127,12 @@ func finalizeNamespace(config *initConfig) error { return errors.Wrap(err, "close exec fds") } + if config.Cwd != "" { + if err := unix.Chdir(config.Cwd); err != nil { + return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err) + } + } + capabilities := &configs.Capabilities{} if config.Capabilities != nil { capabilities = config.Capabilities @@ -154,11 +160,6 @@ func finalizeNamespace(config *initConfig) error { if err := w.ApplyCaps(); err != nil { return errors.Wrap(err, "apply caps") } - if config.Cwd != "" { - if err := unix.Chdir(config.Cwd); err != nil { - return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err) - } - } return nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go index 569c53f6e8..938d8ce064 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/types" "github.com/vishvananda/netlink" ) @@ -37,8 +38,8 @@ func getStrategy(tpe string) (networkStrategy, error) { } // Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. -func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { - out := &NetworkInterface{Name: interfaceName} +func getNetworkInterfaceStats(interfaceName string) (*types.NetworkInterface, error) { + out := &types.NetworkInterface{Name: interfaceName} // This can happen if the network runtime information is missing - possible if the // container was created by an old version of libcontainer. if interfaceName == "" { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index 10888b499b..106c4c2b98 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -279,8 +279,14 @@ func mountCgroupV2(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b if err := os.MkdirAll(cgroupPath, 0755); err != nil { return err } - - return unix.Mount(m.Source, cgroupPath, "cgroup2", uintptr(m.Flags), m.Data) + if err := unix.Mount(m.Source, cgroupPath, "cgroup2", uintptr(m.Flags), m.Data); err != nil { + // when we are in UserNS but CgroupNS is not unshared, we cannot mount cgroup2 (#2158) + if err == unix.EPERM || err == unix.EBUSY { + return unix.Mount("/sys/fs/cgroup", cgroupPath, "", uintptr(m.Flags)|unix.MS_BIND, "") + } + return err + } + return nil } func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error { @@ -293,6 +299,18 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b switch m.Device { case "proc", "sysfs": + // If the destination already exists and is not a directory, we bail + // out This is to avoid mounting through a symlink or similar -- which + // has been a "fun" attack scenario in the past. + // TODO: This won't be necessary once we switch to libpathrs and we can + // stop all of these symlink-exchange attacks. + if fi, err := os.Lstat(dest); err != nil { + if !os.IsNotExist(err) { + return err + } + } else if fi.Mode()&os.ModeDir == 0 { + return fmt.Errorf("filesystem %q must be mounted on ordinary directory", m.Device) + } if err := os.MkdirAll(dest, 0755); err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats.go deleted file mode 100644 index 303e4b94c3..0000000000 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -type NetworkInterface struct { - // Name is the name of the network interface. - Name string - - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go index 29fd641e9d..fff9dd37af 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go @@ -1,10 +1,13 @@ package libcontainer -import "github.com/opencontainers/runc/libcontainer/cgroups" -import "github.com/opencontainers/runc/libcontainer/intelrdt" +import ( + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/intelrdt" + "github.com/opencontainers/runc/types" +) type Stats struct { - Interfaces []*NetworkInterface + Interfaces []*types.NetworkInterface CgroupStats *cgroups.Stats IntelRdtStats *intelrdt.Stats } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/types/events.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/types/events.go new file mode 100644 index 0000000000..c6f0e97a3b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/types/events.go @@ -0,0 +1,130 @@ +package types + +// Event struct for encoding the event data to json. +type Event struct { + Type string `json:"type"` + ID string `json:"id"` + Data interface{} `json:"data,omitempty"` +} + +// stats is the runc specific stats structure for stability when encoding and decoding stats. +type Stats struct { + CPU Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` + IntelRdt IntelRdt `json:"intel_rdt"` + NetworkInterfaces []*NetworkInterface `json:"network_interfaces"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} + +type L3CacheInfo struct { + CbmMask string `json:"cbm_mask,omitempty"` + MinCbmBits uint64 `json:"min_cbm_bits,omitempty"` + NumClosids uint64 `json:"num_closids,omitempty"` +} + +type MemBwInfo struct { + BandwidthGran uint64 `json:"bandwidth_gran,omitempty"` + DelayLinear uint64 `json:"delay_linear,omitempty"` + MinBandwidth uint64 `json:"min_bandwidth,omitempty"` + NumClosids uint64 `json:"num_closids,omitempty"` +} + +type IntelRdt struct { + // The read-only L3 cache information + L3CacheInfo *L3CacheInfo `json:"l3_cache_info,omitempty"` + + // The read-only L3 cache schema in root + L3CacheSchemaRoot string `json:"l3_cache_schema_root,omitempty"` + + // The L3 cache schema in 'container_id' group + L3CacheSchema string `json:"l3_cache_schema,omitempty"` + + // The read-only memory bandwidth information + MemBwInfo *MemBwInfo `json:"mem_bw_info,omitempty"` + + // The read-only memory bandwidth schema in root + MemBwSchemaRoot string `json:"mem_bw_schema_root,omitempty"` + + // The memory bandwidth schema in 'container_id' group + MemBwSchema string `json:"mem_bw_schema,omitempty"` +} + +type NetworkInterface struct { + // Name is the name of the network interface. + Name string + + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} diff --git a/cluster-autoscaler/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/cluster-autoscaler/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 9805432c2a..2f4930d9dd 100644 --- a/cluster-autoscaler/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/cluster-autoscaler/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,11 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -package io_prometheus_client // import "github.com/prometheus/client_model/go" +package io_prometheus_client -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +19,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 @@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{ 3: "UNTYPED", 4: "HISTOGRAM", } + var MetricType_value = map[string]int32{ "COUNTER": 0, "GAUGE": 1, @@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType { *p = x return p } + func (x MetricType) String() string { return proto.EnumName(MetricType_name, int32(x)) } + func (x *MetricType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") if err != nil { @@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } + func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } type LabelPair struct { @@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } + func (m *LabelPair) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LabelPair.Unmarshal(m, b) } func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) } -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) } func (m *LabelPair) XXX_Size() int { return xxx_messageInfo_LabelPair.Size(m) @@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} + return fileDescriptor_6039342a2ba47b72, []int{1} } + func (m *Gauge) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Gauge.Unmarshal(m, b) } func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) } -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) } func (m *Gauge) XXX_Size() int { return xxx_messageInfo_Gauge.Size(m) @@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} + return fileDescriptor_6039342a2ba47b72, []int{2} } + func (m *Counter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Counter.Unmarshal(m, b) } func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Counter.Marshal(b, m, deterministic) } -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) } func (m *Counter) XXX_Size() int { return xxx_messageInfo_Counter.Size(m) @@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 { return 0 } +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + type Quantile struct { Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} + return fileDescriptor_6039342a2ba47b72, []int{3} } + func (m *Quantile) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantile.Unmarshal(m, b) } func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) } -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) } func (m *Quantile) XXX_Size() int { return xxx_messageInfo_Quantile.Size(m) @@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} + return fileDescriptor_6039342a2ba47b72, []int{4} } + func (m *Summary) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Summary.Unmarshal(m, b) } func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Summary.Marshal(b, m, deterministic) } -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) } func (m *Summary) XXX_Size() int { return xxx_messageInfo_Summary.Size(m) @@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} + return fileDescriptor_6039342a2ba47b72, []int{5} } + func (m *Untyped) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Untyped.Unmarshal(m, b) } func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) } -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) } func (m *Untyped) XXX_Size() int { return xxx_messageInfo_Untyped.Size(m) @@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} + return fileDescriptor_6039342a2ba47b72, []int{6} } + func (m *Histogram) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Histogram.Unmarshal(m, b) } func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) } -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) } func (m *Histogram) XXX_Size() int { return xxx_messageInfo_Histogram.Size(m) @@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} + return fileDescriptor_6039342a2ba47b72, []int{7} } + func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) } func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) } -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) } func (m *Bucket) XXX_Size() int { return xxx_messageInfo_Bucket.Size(m) @@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 { return 0 } +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + type Metric struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` @@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} + return fileDescriptor_6039342a2ba47b72, []int{9} } + func (m *Metric) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metric.Unmarshal(m, b) } func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metric.Marshal(b, m, deterministic) } -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) } func (m *Metric) XXX_Size() int { return xxx_messageInfo_Metric.Size(m) @@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} + return fileDescriptor_6039342a2ba47b72, []int{10} } + func (m *MetricFamily) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetricFamily.Unmarshal(m, b) } func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) } -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) } func (m *MetricFamily) XXX_Size() int { return xxx_messageInfo_MetricFamily.Size(m) @@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") @@ -580,50 +669,55 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go index c48b03691e..668b720380 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go @@ -40,7 +40,7 @@ const ( // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go index 0b6cba822a..0083fb0e3d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types.go index 86b05c54e3..be8913eb4f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types.go index 618ff8c0f1..cf117d268c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index 33f274429a..6655fcefbe 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -4061,6 +4061,7 @@ type ServiceList struct { } // +genclient +// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceAccount binds together: diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 65143d29fc..b8054528f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -84,7 +84,7 @@ message FlowSchemaList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. - // +listType=set + // +listType=atomic repeated FlowSchema items = 2; } @@ -110,7 +110,7 @@ message FlowSchemaSpec { // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if // at least one member of rules matches the request. // if it is an empty slice, there will be no requests matching the FlowSchema. - // +listType=set + // +listType=atomic // +optional repeated PolicyRulesWithSubjects rules = 4; } @@ -210,20 +210,20 @@ message PolicyRulesWithSubjects { // subjects is the list of normal user, serviceaccount, or group that this rule cares about. // There must be at least one member in this slice. // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. - // +listType=set + // +listType=atomic // Required. repeated Subject subjects = 1; // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the // target resource. // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. - // +listType=set + // +listType=atomic // +optional repeated ResourcePolicyRule resourceRules = 2; // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb // and the target non-resource URL. - // +listType=set + // +listType=atomic // +optional repeated NonResourcePolicyRule nonResourceRules = 3; } @@ -275,7 +275,7 @@ message PriorityLevelConfigurationList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. - // +listType=set + // +listType=atomic repeated PriorityLevelConfiguration items = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index b53abd0e91..16bcf819ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -84,7 +84,7 @@ type FlowSchemaList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // `items` is a list of FlowSchemas. - // +listType=set + // +listType=atomic Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -107,7 +107,7 @@ type FlowSchemaSpec struct { // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if // at least one member of rules matches the request. // if it is an empty slice, there will be no requests matching the FlowSchema. - // +listType=set + // +listType=atomic // +optional Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` } @@ -152,18 +152,18 @@ type PolicyRulesWithSubjects struct { // subjects is the list of normal user, serviceaccount, or group that this rule cares about. // There must be at least one member in this slice. // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. - // +listType=set + // +listType=atomic // Required. Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the // target resource. // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. - // +listType=set + // +listType=atomic // +optional ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb // and the target non-resource URL. - // +listType=set + // +listType=atomic // +optional NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` } @@ -350,7 +350,7 @@ type PriorityLevelConfigurationList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // `items` is a list of request-priorities. - // +listType=set + // +listType=atomic Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 7f63175d3e..8ffcb9f09a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -37,12 +37,8 @@ var ( big1024 = big.NewInt(1024) // Commonly needed inf.Dec values-- treat as read only! - decZero = inf.NewDec(0, 0) - decOne = inf.NewDec(1, 0) - decMinusOne = inf.NewDec(-1, 0) - decThousand = inf.NewDec(1000, 0) - dec1024 = inf.NewDec(1024, 0) - decMinus1024 = inf.NewDec(-1024, 0) + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) // Largest (in magnitude) number allowed. maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 516d041daf..d95e03aa92 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -634,6 +634,11 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (q Quantity) ToUnstructured() interface{} { + return q.String() +} + // UnmarshalJSON implements the json.Unmarshaller interface. // TODO: Remove support for leading/trailing whitespace func (q *Quantity) UnmarshalJSON(value []byte) error { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index babe8a8b53..a22b07878f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -49,6 +49,11 @@ func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (d Duration) ToUnstructured() interface{} { + return d.Duration.String() +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index fe510ed9e6..4a1d89cfce 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -153,6 +153,16 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (t Time) ToUnstructured() interface{} { + if t.IsZero() { + return nil + } + buf := make([]byte, 0, len(time.RFC3339)) + buf = t.UTC().AppendFormat(buf, time.RFC3339) + return string(buf) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD index f63f13cce7..7fe64d2a2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -72,6 +72,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/structured-merge-diff/v3/value:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 8896ba79ea..918d0831d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -17,7 +17,6 @@ limitations under the License. package runtime import ( - "bytes" encodingjson "encoding/json" "fmt" "math" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/structured-merge-diff/v3/value" "k8s.io/klog" ) @@ -68,8 +68,6 @@ func newFieldsCache() *fieldsCache { } var ( - marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() - unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) stringType = reflect.TypeOf(string("")) fieldCache = newFieldsCache() @@ -205,13 +203,9 @@ func fromUnstructured(sv, dv reflect.Value) error { } // Check if the object has a custom JSON marshaller/unmarshaller. - if reflect.PtrTo(dt).Implements(unmarshalerType) { - data, err := json.Marshal(sv.Interface()) - if err != nil { - return fmt.Errorf("error encoding %s to json: %v", st.String(), err) - } - unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) - return unmarshaler.UnmarshalJSON(data) + entry := value.TypeReflectEntryOf(dv.Type()) + if entry.CanConvertFromUnstructured() { + return entry.FromUnstructured(sv, dv) } switch dt.Kind() { @@ -481,94 +475,19 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { return json.Unmarshal(data, u) } -var ( - nullBytes = []byte("null") - trueBytes = []byte("true") - falseBytes = []byte("false") -) - -func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) { - // Check value receivers if v is not a pointer and pointer receivers if v is a pointer - if v.Type().Implements(marshalerType) { - return v.Interface().(encodingjson.Marshaler), true - } - // Check pointer receivers if v is not a pointer - if v.Kind() != reflect.Ptr && v.CanAddr() { - v = v.Addr() - if v.Type().Implements(marshalerType) { - return v.Interface().(encodingjson.Marshaler), true - } - } - return nil, false -} - func toUnstructured(sv, dv reflect.Value) error { - // Check if the object has a custom JSON marshaller/unmarshaller. - if marshaler, ok := getMarshaler(sv); ok { - if sv.Kind() == reflect.Ptr && sv.IsNil() { - // We're done - we don't need to store anything. - return nil - } - - data, err := marshaler.MarshalJSON() + // Check if the object has a custom string converter. + entry := value.TypeReflectEntryOf(sv.Type()) + if entry.CanConvertToUnstructured() { + v, err := entry.ToUnstructured(sv) if err != nil { return err } - switch { - case len(data) == 0: - return fmt.Errorf("error decoding from json: empty value") - - case bytes.Equal(data, nullBytes): - // We're done - we don't need to store anything. - - case bytes.Equal(data, trueBytes): - dv.Set(reflect.ValueOf(true)) - - case bytes.Equal(data, falseBytes): - dv.Set(reflect.ValueOf(false)) - - case data[0] == '"': - var result string - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding string from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - case data[0] == '{': - result := make(map[string]interface{}) - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding object from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - case data[0] == '[': - result := make([]interface{}, 0) - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding array from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - default: - var ( - resultInt int64 - resultFloat float64 - err error - ) - if err = json.Unmarshal(data, &resultInt); err == nil { - dv.Set(reflect.ValueOf(resultInt)) - } else if err = json.Unmarshal(data, &resultFloat); err == nil { - dv.Set(reflect.ValueOf(resultFloat)) - } else { - return fmt.Errorf("error decoding number from json: %v", err) - } + if v != nil { + dv.Set(reflect.ValueOf(v)) } - return nil } - st := sv.Type() switch st.Kind() { case reflect.String: diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD index 805e9b1791..e9554e336c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -21,6 +21,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD index 7f24e92431..de7957e347 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD @@ -10,7 +10,10 @@ go_test( name = "go_default_test", srcs = ["wait_test.go"], embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], ) go_library( @@ -21,7 +24,10 @@ go_library( ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait", importpath = "k8s.io/apimachinery/pkg/util/wait", - deps = ["//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 76c8c3d23c..4cb0c122c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -19,10 +19,12 @@ package wait import ( "context" "errors" + "math" "math/rand" "sync" "time" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" ) @@ -128,9 +130,15 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe // Close stopCh to stop. f may not be invoked if stop channel is already // closed. Pass NeverStop to if you don't want it stop. func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - var t *time.Timer - var sawTimeout bool + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer for { select { case <-stopCh: @@ -138,13 +146,8 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b default: } - jitteredPeriod := period - if jitterFactor > 0.0 { - jitteredPeriod = Jitter(period, jitterFactor) - } - if !sliding { - t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) + t = backoff.Backoff() } func() { @@ -153,7 +156,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b }() if sliding { - t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) + t = backoff.Backoff() } // NOTE: b/c there is no priority selection in golang @@ -164,8 +167,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b select { case <-stopCh: return - case <-t.C: - sawTimeout = true + case <-t.C(): } } } @@ -283,6 +285,92 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance return ctx, cancel } +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides +// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff() +// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained. +// The BackoffManager is supposed to be called in a single-threaded environment. +type BackoffManager interface { + Backoff() clock.Timer +} + +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: c.NewTimer(0), + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff. +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + b.backoffTimer.Reset(b.getNextBackoff()) + return b.backoffTimer +} + +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: c.NewTimer(0), + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + j.backoffTimer.Reset(j.getNextBackoff()) + return j.backoffTimer +} + // ExponentialBackoff repeats a condition check with exponential backoff. // // It repeatedly checks the condition and then sleeps, using `backoff.Step()` @@ -503,16 +591,3 @@ func poller(interval, timeout time.Duration) WaitFunc { return ch }) } - -// resetOrReuseTimer avoids allocating a new timer if one is already in use. -// Not safe for multiple threads. -func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer { - if t == nil { - return time.NewTimer(d) - } - if !t.Stop() && !sawTimeout { - <-t.C - } - t.Reset(d) - return t -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 5db8240458..779ab425d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -154,7 +154,7 @@ func (l *Lifecycle) Admit(ctx context.Context, a admission.Attributes, o admissi // refuse to operate on non-existent namespaces if !exists || forceLiveLookup { // as a last resort, make a call directly to storage - namespace, err = l.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + namespace, err = l.client.CoreV1().Namespaces().Get(context.TODO(), a.GetNamespace(), metav1.GetOptions{}) switch { case errors.IsNotFound(err): return err diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 3d6f022e2f..1d337fb435 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -24,6 +24,7 @@ import ( "time" jsonpatch "github.com/evanphx/json-patch" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/klog" @@ -235,7 +236,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss defer cancel() } - r := client.Post().Context(ctx).Body(request) + r := client.Post().Body(request) // if the context has a deadline, set it as a parameter to inform the backend if deadline, hasDeadline := ctx.Deadline(); hasDeadline { @@ -250,7 +251,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss } } - if err := r.Do().Into(response); err != nil { + if err := r.Do(ctx).Into(response); err != nil { return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } trace.Step("Request completed") diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go index eb7d5ec4c8..183be7b39a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go @@ -17,6 +17,7 @@ limitations under the License. package namespace import ( + "context" "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -76,7 +77,7 @@ func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]stri } if apierrors.IsNotFound(err) { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - namespace, err = m.Client.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{}) + namespace, err = m.Client.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index 781c4ea3ec..ab6b039248 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/api/admissionregistration/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -196,7 +196,7 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWeb defer cancel() } - r := client.Post().Context(ctx).Body(request) + r := client.Post().Body(request) // if the context has a deadline, set it as a parameter to inform the backend if deadline, hasDeadline := ctx.Deadline(); hasDeadline { @@ -211,7 +211,7 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWeb } } - if err := r.Do().Into(response); err != nil { + if err := r.Do(ctx).Into(response); err != nil { return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } trace.Step("Request completed") diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go index 50f0139592..dbe745718e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go @@ -42,8 +42,8 @@ var ( }, []string{"status"}, ) - fetchCount = metrics.NewGaugeVec( - &metrics.GaugeOpts{ + fetchCount = metrics.NewCounterVec( + &metrics.CounterOpts{ Namespace: "authentication", Subsystem: "token_cache", Name: "fetch_total", @@ -51,13 +51,14 @@ var ( }, []string{"status"}, ) - blockCount = metrics.NewGauge( + activeFetchCount = metrics.NewGaugeVec( &metrics.GaugeOpts{ Namespace: "authentication", Subsystem: "token_cache", - Name: "block_count", + Name: "active_fetch_count", StabilityLevel: metrics.ALPHA, }, + []string{"status"}, ) ) @@ -66,7 +67,7 @@ func init() { requestLatency, requestCount, fetchCount, - blockCount, + activeFetchCount, ) } @@ -74,9 +75,11 @@ const ( hitTag = "hit" missTag = "miss" - fetchActiveTag = "active" fetchFailedTag = "error" fetchOkTag = "ok" + + fetchInFlightTag = "in_flight" + fetchBlockedTag = "blocked" ) type statsCollector struct{} @@ -101,12 +104,12 @@ func (statsCollector) authenticating() func(hit bool) { } func (statsCollector) blocking() func() { - blockCount.Inc() - return blockCount.Dec + activeFetchCount.WithLabelValues(fetchBlockedTag).Inc() + return activeFetchCount.WithLabelValues(fetchBlockedTag).Dec } func (statsCollector) fetching() func(ok bool) { - fetchCount.WithLabelValues(fetchActiveTag).Inc() + activeFetchCount.WithLabelValues(fetchInFlightTag).Inc() return func(ok bool) { var tag string if ok { @@ -115,6 +118,8 @@ func (statsCollector) fetching() func(ok bool) { tag = fetchFailedTag } - fetchCount.WithLabelValues(tag).Dec() + fetchCount.WithLabelValues(tag).Inc() + + activeFetchCount.WithLabelValues(fetchInFlightTag).Dec() } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 286e4aad5c..dd9b5e9d97 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -137,31 +137,26 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int if len(name) == 0 { _, name, _ = scope.Namer.ObjectName(obj) } - admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo) - if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { - err = mutatingAdmission.Admit(ctx, admissionAttributes, scope) - if err != nil { - scope.err(err, w, req) - return - } - } - - if scope.FieldManager != nil { - liveObj, err := scope.Creater.New(scope.Kind) - if err != nil { - scope.err(fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err), w, req) - return - } - - obj, err = scope.FieldManager.Update(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) - if err != nil { - scope.err(fmt.Errorf("failed to update object (Create for %v) managed fields: %v", scope.Kind, err), w, req) - return - } - } trace.Step("About to store object in database") result, err := finishRequest(timeout, func() (runtime.Object, error) { + if scope.FieldManager != nil { + liveObj, err := scope.Creater.New(scope.Kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err) + } + obj, err = scope.FieldManager.Update(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) + if err != nil { + return nil, fmt.Errorf("failed to update object (Create for %v) managed fields: %v", scope.Kind, err) + } + } + + admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo) + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { + if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil { + return nil, err + } + } return r.Create( ctx, name, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD index 0661920ccd..b2b1827545 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/BUILD @@ -67,9 +67,13 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto/testing:go_default_library", "//vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath:go_default_library", + "//vendor/sigs.k8s.io/structured-merge-diff/v3/merge:go_default_library", + "//vendor/sigs.k8s.io/structured-merge-diff/v3/typed:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go index 895642c117..8e92699532 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go @@ -19,7 +19,6 @@ package fieldmanager import ( "fmt" "sort" - "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -78,15 +77,15 @@ func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Mana // If we have more than the maximum, sort the update entries by time, oldest first. sort.Slice(updaters, func(i, j int) bool { - iTime, jTime, nTime := managed.Times()[updaters[i]], managed.Times()[updaters[j]], &metav1.Time{Time: time.Time{}} - if iTime == nil { - iTime = nTime + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() } - if jTime == nil { - jTime = nTime + if jTime != nil { + jSeconds = jTime.Unix() } - if !iTime.Equal(jTime) { - return iTime.Before(jTime) + if iSeconds != jSeconds { + return iSeconds < jSeconds } return updaters[i] < updaters[j] }) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index 1a9947f407..5ec1750362 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -35,7 +35,7 @@ const DefaultMaxUpdateManagers int = 10 // DefaultTrackOnCreateProbability defines the default probability that the field management of an object // starts being tracked from the object's creation, instead of from the first time the object is applied to. -const DefaultTrackOnCreateProbability float32 = 0.5 +const DefaultTrackOnCreateProbability float32 = 0.1 // Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. type Managed interface { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go index bf3b0a7795..4778fa7925 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "sort" - "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -194,15 +193,15 @@ func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) return p.Operation < q.Operation } - ntime := &metav1.Time{Time: time.Time{}} - if p.Time == nil { - p.Time = ntime + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() } - if q.Time == nil { - q.Time = ntime + if q.Time != nil { + qSeconds = q.Time.Unix() } - if !p.Time.Equal(q.Time) { - return p.Time.Before(q.Time) + if pSeconds != qSeconds { + return pSeconds < qSeconds } if p.Manager != q.Manager { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go index 7a8e6deb5f..7b87009a16 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/typeconverter.go @@ -49,11 +49,12 @@ var _ TypeConverter = DeducedTypeConverter{} // ObjectToTyped converts an object into a TypedValue with a "deduced type". func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { - u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) } - return typed.DeducedParseableType.FromUnstructured(u) } // TypedToObject transforms the typed value into a runtime.Object. That @@ -80,16 +81,17 @@ func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConv } func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { - u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err - } gvk := obj.GetObjectKind().GroupVersionKind() t := c.parser.Type(gvk) if t == nil { return nil, newNoCorrespondingTypeError(gvk) } - return t.FromUnstructured(u) + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } } func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go index 0de4cdf169..4f61099039 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -99,13 +99,13 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) if err != nil { // Return newObj and just by-pass fields update. This really shouldn't happen. - klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed new object: %v", err) + klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed new object of type %v: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err) return newObj, managed, nil } liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) if err != nil { // Return newObj and just by-pass fields update. This really shouldn't happen. - klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed live object: %v", err) + klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed live object of type %v: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err) return newObj, managed, nil } apiVersion := fieldpath.APIVersion(f.groupVersion.String()) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 9bf2467cc8..37579820a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -291,12 +291,17 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag var versionedDeleteOptions runtime.Object var versionedDeleterObject interface{} + deleteReturnsDeletedObject := false if isGracefulDeleter { versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions")) if err != nil { return nil, err } versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions) + + if mayReturnFullObjectDeleter, ok := storage.(rest.MayReturnFullObjectDeleter); ok { + deleteReturnsDeletedObject = mayReturnFullObjectDeleter.DeleteReturnsDeletedObject() + } } versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status")) @@ -769,15 +774,19 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if isSubresource { doc = "delete " + subresource + " of" + article + kind } + deleteReturnType := versionedStatus + if deleteReturnsDeletedObject { + deleteReturnType = producedObject + } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) route := ws.DELETE(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Writes(versionedStatus). - Returns(http.StatusOK, "OK", versionedStatus). - Returns(http.StatusAccepted, "Accepted", versionedStatus) + Writes(deleteReturnType). + Returns(http.StatusOK, "OK", deleteReturnType). + Returns(http.StatusAccepted, "Accepted", deleteReturnType) if isGracefulDeleter { route.Reads(versionedDeleterObject) route.ParameterNamed("body").Required(false) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD index 8d13a34ead..8abb3d1a61 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD @@ -3,13 +3,6 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["metrics_test.go"], - embed = [":go_default_library"], ) go_library( @@ -20,7 +13,6 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index f4e02fbb6a..c79efdef4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/types" - utilnet "k8s.io/apimachinery/pkg/util/net" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" @@ -65,14 +64,14 @@ var ( requestCounter = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_request_total", - Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, client, and HTTP response contentType and code.", + Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response contentType and code.", StabilityLevel: compbasemetrics.ALPHA, }, // The label_name contentType doesn't follow the label_name convention defined here: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/instrumentation.md // But changing it would break backwards compatibility. Future label_names // should be all lowercase and separated by underscores. - []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "client", "contentType", "code"}, + []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "contentType", "code"}, ) longRunningRequestGauge = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ @@ -256,9 +255,8 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component, contentType string, httpCode, respSize int, elapsed time.Duration) { reportedVerb := cleanVerb(verb, req) dryRun := cleanDryRun(req.URL) - client := cleanUserAgent(utilnet.GetHTTPClient(req)) elapsedSeconds := elapsed.Seconds() - requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() + requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, contentType, codeToString(httpCode)).Inc() requestLatencies.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds) // We are only interested in response sizes of read requests. if verb == "GET" || verb == "LIST" { @@ -376,19 +374,6 @@ func cleanDryRun(u *url.URL) string { return strings.Join(utilsets.NewString(dryRun...).List(), ",") } -func cleanUserAgent(ua string) string { - // We collapse all "web browser"-type user agents into one "browser" to reduce metric cardinality. - if strings.HasPrefix(ua, "Mozilla/") { - return "Browser" - } - // If an old "kubectl.exe" has passed us its full path, we discard the path portion. - if kubectlExeRegexp.MatchString(ua) { - // avoid an allocation - ua = kubectlExeRegexp.ReplaceAllString(ua, "$1") - } - return ua -} - // ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc. type ResponseWriterDelegator struct { http.ResponseWriter diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 8f40751a38..64bb3d57c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -140,6 +140,12 @@ const ( // // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + + // owner: @shaloulcy + // alpha: v1.18 + // + // Allows label and field based indexes in apiserver watch cache to accelerate list operations. + SelectorIndex featuregate.Feature = "SelectorIndex" ) func init() { @@ -165,4 +171,5 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha}, RemoveSelfLink: {Default: false, PreRelease: featuregate.Alpha}, + SelectorIndex: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD index d89d10356f..5bccea16fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD @@ -23,6 +23,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/options.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/options.go index 907928a356..577192b626 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/options.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/options.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/client-go/tools/cache" ) // RESTOptions is set of configuration options to generic registries. @@ -49,4 +50,5 @@ type StoreOptions struct { RESTOptions RESTOptionsGetter TriggerFunc storage.IndexerFuncs AttrFunc storage.AttrFunc + Indexers *cache.Indexers } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 993f21200f..223b630fc5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -21,6 +21,7 @@ import ( "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" ) // StorageDecorator is a function signature for producing a storage.Interface @@ -32,7 +33,8 @@ type StorageDecorator func( newFunc func() runtime.Object, newListFunc func() runtime.Object, getAttrsFunc storage.AttrFunc, - trigger storage.IndexerFuncs) (storage.Interface, factory.DestroyFunc, error) + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) // UndecoratedStorage returns the given a new storage from the given config // without any decoration. @@ -43,7 +45,8 @@ func UndecoratedStorage( newFunc func() runtime.Object, newListFunc func() runtime.Object, getAttrsFunc storage.AttrFunc, - trigger storage.IndexerFuncs) (storage.Interface, factory.DestroyFunc, error) { + trigger storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { return NewRawStorage(config) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go index 9ea6f98970..8f9c981dd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -161,6 +161,11 @@ type GracefulDeleter interface { Delete(ctx context.Context, name string, deleteValidation ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) } +// MayReturnFullObjectDeleter may return deleted object (instead of a simple status) on deletion. +type MayReturnFullObjectDeleter interface { + DeleteReturnsDeletedObject() bool +} + // CollectionDeleter is an object that can delete a collection // of RESTful resources. type CollectionDeleter interface { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index 7e8ae5241e..31d4b2a285 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -87,6 +87,12 @@ var Everything = SelectionPredicate{ Field: fields.Everything(), } +// MatchValue defines a pair (, ). +type MatchValue struct { + IndexName string + Value string +} + // Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update // that is guaranteed to succeed. // See the comment for GuaranteedUpdate for more details. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go index 66d8d1f958..7370518e39 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go @@ -74,6 +74,7 @@ type SelectionPredicate struct { Label labels.Selector Field fields.Selector GetAttrs AttrFunc + IndexLabels []string IndexFields []string Limit int64 Continue string @@ -128,3 +129,31 @@ func (s *SelectionPredicate) MatchesSingle() (string, bool) { func (s *SelectionPredicate) Empty() bool { return s.Label.Empty() && s.Field.Empty() } + +// For any index defined by IndexFields, if a matcher can match only (a subset) +// of objects that return for a given index, a pair (, ) +// wil be returned. +func (s *SelectionPredicate) MatcherIndex() []MatchValue { + var result []MatchValue + for _, field := range s.IndexFields { + if value, ok := s.Field.RequiresExactMatch(field); ok { + result = append(result, MatchValue{IndexName: FieldIndex(field), Value: value}) + } + } + for _, label := range s.IndexLabels { + if value, ok := s.Label.RequiresExactMatch(label); ok { + result = append(result, MatchValue{IndexName: LabelIndex(label), Value: value}) + } + } + return result +} + +// LabelIndex add prefix for label index. +func LabelIndex(label string) string { + return "l:" + label +} + +// FiledIndex add prefix for field index. +func FieldIndex(field string) string { + return "f:" + field +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD index e374e481bd..dc2839f18d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD @@ -36,6 +36,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index 53c5d2285b..7410d1959e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -25,6 +25,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" @@ -41,7 +42,7 @@ const retryBackoff = 500 * time.Millisecond var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) type tokenReviewer interface { - CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) + Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) } type WebhookTokenAuthenticator struct { @@ -101,7 +102,7 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token auds authenticator.Audiences ) webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { - result, err = w.tokenReview.CreateContext(ctx, r) + result, err = w.tokenReview.Create(ctx, r, metav1.CreateOptions{}) return err }, webhook.DefaultShouldRetry) if err != nil { @@ -196,9 +197,9 @@ type tokenReviewV1Client struct { w *webhook.GenericWebhook } -func (t *tokenReviewV1Client) CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { +func (t *tokenReviewV1Client) Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) { result := &authenticationv1.TokenReview{} - err := t.w.RestClient.Post().Context(ctx).Body(review).Do().Into(result) + err := t.w.RestClient.Post().Body(review).Do(ctx).Into(result) return result, err } @@ -206,10 +207,10 @@ type tokenReviewV1beta1Client struct { w *webhook.GenericWebhook } -func (t *tokenReviewV1beta1Client) CreateContext(ctx context.Context, review *authenticationv1.TokenReview) (*authenticationv1.TokenReview, error) { +func (t *tokenReviewV1beta1Client) Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, error) { v1beta1Review := &authenticationv1beta1.TokenReview{Spec: v1SpecToV1beta1Spec(&review.Spec)} v1beta1Result := &authenticationv1beta1.TokenReview{} - err := t.w.RestClient.Post().Context(ctx).Body(v1beta1Review).Do().Into(v1beta1Result) + err := t.w.RestClient.Post().Body(v1beta1Review).Do(ctx).Into(v1beta1Result) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD index 2c15fa9763..b3208931c7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD @@ -35,6 +35,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/authorization/v1:go_default_library", "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/cache:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 3839d483ee..cc8b5e78bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -27,6 +27,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" @@ -47,7 +48,7 @@ const ( var _ authorizer.Authorizer = (*WebhookAuthorizer)(nil) type subjectAccessReviewer interface { - CreateContext(context.Context, *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) + Create(context.Context, *authorizationv1.SubjectAccessReview, metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) } type WebhookAuthorizer struct { @@ -189,7 +190,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri err error ) webhook.WithExponentialBackoff(ctx, w.initialBackoff, func() error { - result, err = w.subjectAccessReview.CreateContext(ctx, r) + result, err = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{}) return err }, webhook.DefaultShouldRetry) if err != nil { @@ -287,9 +288,9 @@ type subjectAccessReviewV1Client struct { w *webhook.GenericWebhook } -func (t *subjectAccessReviewV1Client) CreateContext(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { +func (t *subjectAccessReviewV1Client) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) { result := &authorizationv1.SubjectAccessReview{} - err := t.w.RestClient.Post().Context(ctx).Body(subjectAccessReview).Do().Into(result) + err := t.w.RestClient.Post().Body(subjectAccessReview).Do(ctx).Into(result) return result, err } @@ -297,10 +298,10 @@ type subjectAccessReviewV1beta1Client struct { w *webhook.GenericWebhook } -func (t *subjectAccessReviewV1beta1Client) CreateContext(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview) (*authorizationv1.SubjectAccessReview, error) { +func (t *subjectAccessReviewV1beta1Client) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) { v1beta1Review := &authorizationv1beta1.SubjectAccessReview{Spec: v1SpecToV1beta1Spec(&subjectAccessReview.Spec)} v1beta1Result := &authorizationv1beta1.SubjectAccessReview{} - err := t.w.RestClient.Post().Context(ctx).Body(v1beta1Review).Do().Into(v1beta1Result) + err := t.w.RestClient.Post().Body(v1beta1Review).Do(ctx).Into(v1beta1Result) if err == nil { subjectAccessReview.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go index 5d89457cca..dc12f9a296 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -17,6 +17,7 @@ limitations under the License. package discovery import ( + "context" "encoding/json" "fmt" "net/url" @@ -155,7 +156,7 @@ func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.API func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { // Get the groupVersions exposed at /api v := &metav1.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) + err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do(context.TODO()).Into(v) apiGroup := metav1.APIGroup{} if err == nil && len(v.Versions) != 0 { apiGroup = apiVersionsToAPIGroup(v) @@ -166,7 +167,7 @@ func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err // Get the groupVersions exposed at /apis apiGroupList = &metav1.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) + err = d.restClient.Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } @@ -196,7 +197,7 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r resources = &metav1.APIResourceList{ GroupVersion: groupVersion, } - err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) + err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources) if err != nil { // ignore 403 or 404 error to be compatible with an v1.0 server. if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { @@ -405,7 +406,7 @@ func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIReso // ServerVersion retrieves and parses the server's version (git version). func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { - body, err := d.restClient.Get().AbsPath("/version").Do().Raw() + body, err := d.restClient.Get().AbsPath("/version").Do(context.TODO()).Raw() if err != nil { return nil, err } @@ -419,12 +420,12 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { // OpenAPISchema fetches the open api schema using a rest client and parses the proto. func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw() + data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do(context.TODO()).Raw() if err != nil { if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { // single endpoint not found/registered in old server, try to fetch old endpoint // TODO: remove this when kubectl/client-go don't work with 1.9 server - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() + data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go index 8a026788ae..d61504b92c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/simple.go @@ -17,6 +17,7 @@ limitations under the License. package dynamic import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -111,7 +112,7 @@ func (c *dynamicResourceClient) Create(obj *unstructured.Unstructured, opts meta AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -146,7 +147,7 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -182,7 +183,7 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt AbsPath(append(c.makeURLSegments(name), "status")...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -214,7 +215,7 @@ func (c *dynamicResourceClient) Delete(name string, opts *metav1.DeleteOptions, Delete(). AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(deleteOptionsByte). - Do() + Do(context.TODO()) return result.Error() } @@ -232,7 +233,7 @@ func (c *dynamicResourceClient) DeleteCollection(opts *metav1.DeleteOptions, lis AbsPath(c.makeURLSegments("")...). Body(deleteOptionsByte). SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) return result.Error() } @@ -240,7 +241,7 @@ func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subreso if len(name) == 0 { return nil, fmt.Errorf("name is required") } - result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -256,7 +257,7 @@ func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subreso } func (c *dynamicResourceClient) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { - result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -283,7 +284,7 @@ func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, opts.Watch = true return c.client.client.Get().AbsPath(c.makeURLSegments("")...). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Watch() + Watch(context.TODO()) } func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { @@ -295,7 +296,7 @@ func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []by AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(data). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go index 4fadd9a216..b768f6f7f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" @@ -60,13 +61,13 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1.MutatingWebhookConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go index 1c648e6081..8ddcdf2d90 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" @@ -60,13 +61,13 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1.ValidatingWebhookConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index a06c406c2c..12c8ec1fbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1beta1.MutatingWebhookConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 3b7fafd29c..05eb05097f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go index 2f69e0df01..31e2b74d0f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ControllerRevisions(namespace).List(options) + return client.AppsV1().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ControllerRevisions(namespace).Watch(options) + return client.AppsV1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1.ControllerRevision{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go index db649ccbf2..da7fe9509b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().DaemonSets(namespace).List(options) + return client.AppsV1().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().DaemonSets(namespace).Watch(options) + return client.AppsV1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.DaemonSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/deployment.go index 71cd002733..bd639bb3d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().Deployments(namespace).List(options) + return client.AppsV1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().Deployments(namespace).Watch(options) + return client.AppsV1().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1.Deployment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go index 6ee7a0537e..6d81a471a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ReplicaSets(namespace).List(options) + return client.AppsV1().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ReplicaSets(namespace).Watch(options) + return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.ReplicaSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go index 385e653660..c99bbb73ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().StatefulSets(namespace).List(options) + return client.AppsV1().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().StatefulSets(namespace).Watch(options) + return client.AppsV1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.StatefulSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index c7d3e30e04..cb36bd7fd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().ControllerRevisions(namespace).List(options) + return client.AppsV1beta1().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options) + return client.AppsV1beta1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.ControllerRevision{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go index 03bafca6ba..e02a13c2f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().Deployments(namespace).List(options) + return client.AppsV1beta1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().Deployments(namespace).Watch(options) + return client.AppsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.Deployment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index e4d1b46fa6..b845cc99c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().StatefulSets(namespace).List(options) + return client.AppsV1beta1().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().StatefulSets(namespace).Watch(options) + return client.AppsV1beta1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.StatefulSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index 975e81077e..4d0e91320b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ControllerRevisions(namespace).List(options) + return client.AppsV1beta2().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options) + return client.AppsV1beta2().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.ControllerRevision{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 99f17fa6c4..280e2fe465 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().DaemonSets(namespace).List(options) + return client.AppsV1beta2().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().DaemonSets(namespace).Watch(options) + return client.AppsV1beta2().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.DaemonSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go index b25da82bde..67bdb79720 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().Deployments(namespace).List(options) + return client.AppsV1beta2().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().Deployments(namespace).Watch(options) + return client.AppsV1beta2().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.Deployment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 6ce7fcfd0d..85d12bb65d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ReplicaSets(namespace).List(options) + return client.AppsV1beta2().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ReplicaSets(namespace).Watch(options) + return client.AppsV1beta2().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.ReplicaSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index e77bb2f8fd..2fab6f7b2b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().StatefulSets(namespace).List(options) + return client.AppsV1beta2().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().StatefulSets(namespace).Watch(options) + return client.AppsV1beta2().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.StatefulSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go index 69778ad2cf..ef178c3aa8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.AuditregistrationV1alpha1().AuditSinks().List(options) + return client.AuditregistrationV1alpha1().AuditSinks().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) + return client.AuditregistrationV1alpha1().AuditSinks().Watch(context.TODO(), options) }, }, &auditregistrationv1alpha1.AuditSink{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index 205e4ecd79..44f041e906 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv1.HorizontalPodAutoscaler{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 4627c5a0b5..6385a2a190 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta1 import ( + "context" time "time" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv2beta1.HorizontalPodAutoscaler{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go index b4863f9b74..f1ac3f0737 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta2 import ( + "context" time "time" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv2beta2.HorizontalPodAutoscaler{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1/job.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1/job.go index 30d41104ad..4992f52286 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1/job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" batchv1 "k8s.io/api/batch/v1" @@ -61,13 +62,13 @@ func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyn if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1().Jobs(namespace).List(options) + return client.BatchV1().Jobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1().Jobs(namespace).Watch(options) + return client.BatchV1().Jobs(namespace).Watch(context.TODO(), options) }, }, &batchv1.Job{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 0b7598e0f8..820c93eaaa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1beta1().CronJobs(namespace).List(options) + return client.BatchV1beta1().CronJobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1beta1().CronJobs(namespace).Watch(options) + return client.BatchV1beta1().CronJobs(namespace).Watch(context.TODO(), options) }, }, &batchv1beta1.CronJob{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go index 20cf7d498d..5f5b870d4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v2alpha1 import ( + "context" time "time" batchv2alpha1 "k8s.io/api/batch/v2alpha1" @@ -61,13 +62,13 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV2alpha1().CronJobs(namespace).List(options) + return client.BatchV2alpha1().CronJobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV2alpha1().CronJobs(namespace).Watch(options) + return client.BatchV2alpha1().CronJobs(namespace).Watch(context.TODO(), options) }, }, &batchv2alpha1.CronJob{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 6472d20e29..4e167ab8b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.CertificatesV1beta1().CertificateSigningRequests().List(options) + return client.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CertificatesV1beta1().CertificateSigningRequests().Watch(options) + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, &certificatesv1beta1.CertificateSigningRequest{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1/lease.go index b8a3de471c..e538923a86 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" coordinationv1 "k8s.io/api/coordination/v1" @@ -61,13 +62,13 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).List(options) + return client.CoordinationV1().Leases(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).Watch(options) + return client.CoordinationV1().Leases(namespace).Watch(context.TODO(), options) }, }, &coordinationv1.Lease{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go index bb59be13e1..5a6959c0ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1beta1().Leases(namespace).List(options) + return client.CoordinationV1beta1().Leases(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1beta1().Leases(namespace).Watch(options) + return client.CoordinationV1beta1().Leases(namespace).Watch(context.TODO(), options) }, }, &coordinationv1beta1.Lease{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go index a5ae6fc496..ccdee535bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPerio if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ComponentStatuses().List(options) + return client.CoreV1().ComponentStatuses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ComponentStatuses().Watch(options) + return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options) }, }, &corev1.ComponentStatus{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/configmap.go index 48cb1a48e4..6253581784 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ConfigMaps(namespace).List(options) + return client.CoreV1().ConfigMaps(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ConfigMaps(namespace).Watch(options) + return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options) }, }, &corev1.ConfigMap{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/endpoints.go index 77fa8cf8a0..cd0f25b7f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Endpoints(namespace).List(options) + return client.CoreV1().Endpoints(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Endpoints(namespace).Watch(options) + return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options) }, }, &corev1.Endpoints{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/event.go index 52f4911c1d..8825e9b7a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Events(namespace).List(options) + return client.CoreV1().Events(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Events(namespace).Watch(options) + return client.CoreV1().Events(namespace).Watch(context.TODO(), options) }, }, &corev1.Event{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/limitrange.go index 7499e1869d..4cbfda1f7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().LimitRanges(namespace).List(options) + return client.CoreV1().LimitRanges(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().LimitRanges(namespace).Watch(options) + return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options) }, }, &corev1.LimitRange{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/namespace.go index 57a073355a..506f930a7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Namespaces().List(options) + return client.CoreV1().Namespaces().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Namespaces().Watch(options) + return client.CoreV1().Namespaces().Watch(context.TODO(), options) }, }, &corev1.Namespace{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/node.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/node.go index d9b85f83c2..9939fc2cb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Nodes().List(options) + return client.CoreV1().Nodes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Nodes().Watch(options) + return client.CoreV1().Nodes().Watch(context.TODO(), options) }, }, &corev1.Node{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go index a50bcfc663..c82445997c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumes().List(options) + return client.CoreV1().PersistentVolumes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumes().Watch(options) + return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options) }, }, &corev1.PersistentVolume{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 3fb5e5f6cc..7a7df1cff8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumeClaims(namespace).List(options) + return client.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumeClaims(namespace).Watch(options) + return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options) }, }, &corev1.PersistentVolumeClaim{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/pod.go index 57aadd9458..5c713a9b6f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyn if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Pods(namespace).List(options) + return client.CoreV1().Pods(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Pods(namespace).Watch(options) + return client.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, }, &corev1.Pod{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go index ff47094fb8..2a16e910db 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PodTemplates(namespace).List(options) + return client.CoreV1().PodTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PodTemplates(namespace).Watch(options) + return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options) }, }, &corev1.PodTemplate{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go index 903fe3fbad..930beb4cd5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredReplicationControllerInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ReplicationControllers(namespace).List(options) + return client.CoreV1().ReplicationControllers(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ReplicationControllers(namespace).Watch(options) + return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options) }, }, &corev1.ReplicationController{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go index 27ae53ccb4..619262a612 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ResourceQuotas(namespace).List(options) + return client.CoreV1().ResourceQuotas(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ResourceQuotas(namespace).Watch(options) + return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options) }, }, &corev1.ResourceQuota{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/secret.go index e13776b2b1..a6be070693 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, re if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Secrets(namespace).List(options) + return client.CoreV1().Secrets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Secrets(namespace).Watch(options) + return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) }, }, &corev1.Secret{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/service.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/service.go index 1c758668c4..3d9ecc6e95 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Services(namespace).List(options) + return client.CoreV1().Services(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Services(namespace).Watch(options) + return client.CoreV1().Services(namespace).Watch(context.TODO(), options) }, }, &corev1.Service{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go index c701b8f1e6..44371c9fa4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace st if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ServiceAccounts(namespace).List(options) + return client.CoreV1().ServiceAccounts(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ServiceAccounts(namespace).Watch(options) + return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options) }, }, &corev1.ServiceAccount{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go index a545ce1551..c5e383c0b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(options) + return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(options) + return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, &discoveryv1alpha1.EndpointSlice{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go index f658866c2a..69ae38a91a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" discoveryv1beta1 "k8s.io/api/discovery/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1beta1().EndpointSlices(namespace).List(options) + return client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(options) + return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, &discoveryv1beta1.EndpointSlice{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/events/v1beta1/event.go index 0ac6fa2827..025f6a5cf3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" eventsv1beta1 "k8s.io/api/events/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.EventsV1beta1().Events(namespace).List(options) + return client.EventsV1beta1().Events(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.EventsV1beta1().Events(namespace).Watch(options) + return client.EventsV1beta1().Events(namespace).Watch(context.TODO(), options) }, }, &eventsv1beta1.Event{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index 80e84eba80..050080a598 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().DaemonSets(namespace).List(options) + return client.ExtensionsV1beta1().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(options) + return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.DaemonSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index cef4b8150f..1b16c5cc91 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Deployments(namespace).List(options) + return client.ExtensionsV1beta1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Deployments(namespace).Watch(options) + return client.ExtensionsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.Deployment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index 72a88f3138..f01a887617 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Ingresses(namespace).List(options) + return client.ExtensionsV1beta1().Ingresses(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) + return client.ExtensionsV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.Ingress{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go index 92f4f04007..4a924619fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(options) + return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(options) + return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.NetworkPolicy{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go index 6f91e54582..11be2751cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().List(options) + return client.ExtensionsV1beta1().PodSecurityPolicies().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(options) + return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) }, }, &extensionsv1beta1.PodSecurityPolicy{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index e8847aa2cf..f7e224bcfb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().ReplicaSets(namespace).List(options) + return client.ExtensionsV1beta1().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(options) + return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.ReplicaSet{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go index af1d874c72..9a4a904481 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().List(options) + return client.FlowcontrolV1alpha1().FlowSchemas().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().Watch(options) + return client.FlowcontrolV1alpha1().FlowSchemas().Watch(context.TODO(), options) }, }, &flowcontrolv1alpha1.FlowSchema{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go index c145b7d411..b81f5c9c36 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, &flowcontrolv1alpha1.PriorityLevelConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go index c2255c0dfd..a75c9ac21f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" networkingv1 "k8s.io/api/networking/v1" @@ -61,13 +62,13 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1().NetworkPolicies(namespace).List(options) + return client.NetworkingV1().NetworkPolicies(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1().NetworkPolicies(namespace).Watch(options) + return client.NetworkingV1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, &networkingv1.NetworkPolicy{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go index 8abd00e17b..8800d6c9cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" networkingv1beta1 "k8s.io/api/networking/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).List(options) + return client.NetworkingV1beta1().Ingresses(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).Watch(options) + return client.NetworkingV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, &networkingv1beta1.Ingress{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go index 31edf930ae..d314a9573c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1alpha1().RuntimeClasses().List(options) + return client.NodeV1alpha1().RuntimeClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1alpha1().RuntimeClasses().Watch(options) + return client.NodeV1alpha1().RuntimeClasses().Watch(context.TODO(), options) }, }, &nodev1alpha1.RuntimeClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go index 6972993ad8..07619b2306 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" nodev1beta1 "k8s.io/api/node/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1beta1().RuntimeClasses().List(options) + return client.NodeV1beta1().RuntimeClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1beta1().RuntimeClasses().Watch(options) + return client.NodeV1beta1().RuntimeClasses().Watch(context.TODO(), options) }, }, &nodev1beta1.RuntimeClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index dce61f7f18..4530343ecc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(options) + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(options) + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, &policyv1beta1.PodDisruptionBudget{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go index 7ce5684fb0..b87d23434e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodSecurityPolicies().List(options) + return client.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodSecurityPolicies().Watch(options) + return client.PolicyV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) }, }, &policyv1beta1.PodSecurityPolicy{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go index b8096e6bca..0572be264b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoles().List(options) + return client.RbacV1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoles().Watch(options) + return client.RbacV1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1.ClusterRole{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 5ef3407c4c..51026c0558 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoleBindings().List(options) + return client.RbacV1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoleBindings().Watch(options) + return client.RbacV1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1.ClusterRoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/role.go index 2d98874e5d..986a5f29f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().Roles(namespace).List(options) + return client.RbacV1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().Roles(namespace).Watch(options) + return client.RbacV1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1.Role{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go index a97107de1a..0264049fb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().RoleBindings(namespace).List(options) + return client.RbacV1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().RoleBindings(namespace).Watch(options) + return client.RbacV1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1.RoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 58c9c41259..70d9885f0a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoles().List(options) + return client.RbacV1alpha1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoles().Watch(options) + return client.RbacV1alpha1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1alpha1.ClusterRole{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 759c716bf8..8c18f67928 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoleBindings().List(options) + return client.RbacV1alpha1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoleBindings().Watch(options) + return client.RbacV1alpha1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1alpha1.ClusterRoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 1d1f99f064..7dc4551d92 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().Roles(namespace).List(options) + return client.RbacV1alpha1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().Roles(namespace).Watch(options) + return client.RbacV1alpha1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1alpha1.Role{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index 9fcb01d3ab..d49ec8b362 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().RoleBindings(namespace).List(options) + return client.RbacV1alpha1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().RoleBindings(namespace).Watch(options) + return client.RbacV1alpha1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1alpha1.RoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index b82c1c740a..e50e1d3935 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoles().List(options) + return client.RbacV1beta1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoles().Watch(options) + return client.RbacV1beta1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1beta1.ClusterRole{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index d662e7f563..a7ea4cd38d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoleBindings().List(options) + return client.RbacV1beta1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoleBindings().Watch(options) + return client.RbacV1beta1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1beta1.ClusterRoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go index b885beb272..e56961e81e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().Roles(namespace).List(options) + return client.RbacV1beta1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().Roles(namespace).Watch(options) + return client.RbacV1beta1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1beta1.Role{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index 63d9d7264e..d893882db3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().RoleBindings(namespace).List(options) + return client.RbacV1beta1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().RoleBindings(namespace).Watch(options) + return client.RbacV1beta1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1beta1.RoleBinding{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go index a9ee6289e4..730616b4a5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" schedulingv1 "k8s.io/api/scheduling/v1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1().PriorityClasses().List(options) + return client.SchedulingV1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1().PriorityClasses().Watch(options) + return client.SchedulingV1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1.PriorityClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index cd90dd7654..f82b664369 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1alpha1().PriorityClasses().List(options) + return client.SchedulingV1alpha1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1alpha1().PriorityClasses().Watch(options) + return client.SchedulingV1alpha1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1alpha1.PriorityClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go index 3c7d90938f..fc7848891e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1beta1().PriorityClasses().List(options) + return client.SchedulingV1beta1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1beta1().PriorityClasses().Watch(options) + return client.SchedulingV1beta1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1beta1.PriorityClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go index 33fcf2359e..8c10b16c85 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).List(options) + return client.SettingsV1alpha1().PodPresets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).Watch(options) + return client.SettingsV1alpha1().PodPresets(namespace).Watch(context.TODO(), options) }, }, &settingsv1alpha1.PodPreset{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go index eed947c4a6..96416967fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().CSINodes().List(options) + return client.StorageV1().CSINodes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().CSINodes().Watch(options) + return client.StorageV1().CSINodes().Watch(context.TODO(), options) }, }, &storagev1.CSINode{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go index b4609b4d2f..8cde79d9a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().StorageClasses().List(options) + return client.StorageV1().StorageClasses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().StorageClasses().Watch(options) + return client.StorageV1().StorageClasses().Watch(context.TODO(), options) }, }, &storagev1.StorageClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go index 7ca3b86f22..be605ff48c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().VolumeAttachments().List(options) + return client.StorageV1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().VolumeAttachments().Watch(options) + return client.StorageV1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1.VolumeAttachment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index e169c8a29c..445496dade 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" storagev1alpha1 "k8s.io/api/storage/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1alpha1().VolumeAttachments().List(options) + return client.StorageV1alpha1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1alpha1().VolumeAttachments().Watch(options) + return client.StorageV1alpha1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1alpha1.VolumeAttachment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go index 7f7cb216df..f138a915b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSIDrivers().List(options) + return client.StorageV1beta1().CSIDrivers().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSIDrivers().Watch(options) + return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options) }, }, &storagev1beta1.CSIDriver{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go index 218bb11831..6ba63172a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSINodes().List(options) + return client.StorageV1beta1().CSINodes().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSINodes().Watch(options) + return client.StorageV1beta1().CSINodes().Watch(context.TODO(), options) }, }, &storagev1beta1.CSINode{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index ed898a77b8..a6582bf3d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().StorageClasses().List(options) + return client.StorageV1beta1().StorageClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().StorageClasses().Watch(options) + return client.StorageV1beta1().StorageClasses().Watch(context.TODO(), options) }, }, &storagev1beta1.StorageClass{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go index c75fc06b15..e894246349 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().VolumeAttachments().List(options) + return client.StorageV1beta1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().VolumeAttachments().Watch(options) + return client.StorageV1beta1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1beta1.VolumeAttachment{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index 6e09faf110..3ddf59ee8b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: " var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfiguration"} // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptio } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistrationv1.MutatingWebhookConfigurationList, err error) { +func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *admissionregistrationv1.MutatingWebhookConfigurationList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &admissionregistrationv1.MutatingWebhookConfigurationList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *a } // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration } // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOptions) _, err := c.Fake.Invokes(action, &admissionregistrationv1.MutatingWebhookConfigurationList{}) @@ -110,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteO } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.MutatingWebhookConfiguration{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index a5f8d20456..88fddd04b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfiguration"} // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistrationv1.ValidatingWebhookConfigurationList, err error) { +func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *admissionregistrationv1.ValidatingWebhookConfigurationList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &admissionregistrationv1.ValidatingWebhookConfigurationList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfigurat } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfigurat } // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOptions) _, err := c.Fake.Invokes(action, &admissionregistrationv1.ValidatingWebhookConfigurationList{}) @@ -110,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.Delet } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.ValidatingWebhookConfiguration{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index 1f5e5e3803..e9787bf408 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/admissionregistration/v1" @@ -37,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) - Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) - List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } @@ -61,19 +62,19 @@ func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatin } // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { +func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.In Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Post(). Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Put(). Resource("mutatingwebhookconfigurations"). Name(mutatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { +func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("mutatingwebhookconfigurations"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteO VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { result = &v1.MutatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 7987b6e308..1a9c1b4ac6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/admissionregistration/v1" @@ -37,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) - Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) - List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } @@ -61,19 +62,19 @@ func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *valid } // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { +func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch. Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Post(). Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Put(). Resource("validatingwebhookconfigurations"). Name(validatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { +func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("validatingwebhookconfigurations"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.Delet VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { result = &v1.ValidatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("validatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index d2177bad52..451de685ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: " var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfiguration"} // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptio } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { +func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v } // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration } // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) @@ -110,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteO } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 6be2b39386..4bddb9ad2f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfiguration"} // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { +func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfigurat } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfigurat } // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) @@ -110,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.Delet } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 4524896cd6..4643325a39 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -37,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } @@ -61,19 +62,19 @@ func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mu } // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { +func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interf Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Post(). Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Put(). Resource("mutatingwebhookconfigurations"). Name(mutatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(mutatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("mutatingwebhookconfigurations"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptio VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { +func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { result = &v1beta1.MutatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7e711b3000..26af233d00 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -37,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } @@ -61,19 +62,19 @@ func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) * } // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { +func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Inte Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Post(). Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Put(). Resource("validatingwebhookconfigurations"). Name(validatingWebhookConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(validatingWebhookConfiguration). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("validatingwebhookconfigurations"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOpt VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { +func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { result = &v1beta1.ValidatingWebhookConfiguration{} err = c.client.Patch(pt). Resource("validatingwebhookconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index e28e4d2a3f..d3321e33aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/apps/v1" @@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) - Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ControllerRevision, error) - List(opts metav1.ListOptions) (*v1.ControllerRevisionList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) ControllerRevisionExpansion } @@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisi } // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { +func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, e Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) error { +func (c *controllerRevisions) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, li VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { result = &v1.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index a535cdabe6..4c78281ece 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/apps/v1" @@ -37,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(*v1.DaemonSet) (*v1.DaemonSet, error) - Update(*v1.DaemonSet) (*v1.DaemonSet, error) - UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.DaemonSet, error) - List(opts metav1.ListOptions) (*v1.DaemonSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) + Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) DaemonSetExpansion } @@ -64,20 +65,20 @@ func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { } // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { +func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { +func (c *daemonSets) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *daemonSets) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { result = &v1.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index f9799a4539..c7c32a035b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/apps/v1" @@ -38,17 +39,17 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(*v1.Deployment) (*v1.Deployment, error) - Update(*v1.Deployment) (*v1.Deployment, error) - UpdateStatus(*v1.Deployment) (*v1.Deployment, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Deployment, error) - List(opts metav1.ListOptions) (*v1.DeploymentList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) - GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) + Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) + GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) DeploymentExpansion } @@ -68,20 +69,20 @@ func newDeployments(c *AppsV1Client, namespace string) *deployments { } // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.Deployment, err error) { +func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { +func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -92,13 +93,13 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,63 +110,65 @@ func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { +func (c *deployments) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("deployments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *deployments) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -176,26 +179,27 @@ func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { result = &v1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -203,21 +207,22 @@ func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) Name(deploymentName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deploymentName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index eb38bca41b..b1ccd18090 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Ver var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"} // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{}) @@ -50,7 +52,7 @@ func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *appsv1.ControllerRevisionList, err error) { +func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.ControllerRevisionList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &appsv1.ControllerRevisionList{}) @@ -72,14 +74,14 @@ func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *appsv1.Cont } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.CreateOptions) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) @@ -90,7 +92,7 @@ func (c *FakeControllerRevisions) Create(controllerRevision *appsv1.ControllerRe } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts v1.UpdateOptions) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{}) @@ -101,7 +103,7 @@ func (c *FakeControllerRevisions) Update(controllerRevision *appsv1.ControllerRe } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{}) @@ -109,7 +111,7 @@ func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) } // DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &appsv1.ControllerRevisionList{}) @@ -117,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &appsv1.ControllerRevision{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index c06336e970..fc42495459 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1 var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"} // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{}) @@ -50,7 +52,7 @@ func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *appsv1 } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *appsv1.DaemonSetList, err error) { +func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.DaemonSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &appsv1.DaemonSetList{}) @@ -72,14 +74,14 @@ func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *appsv1.DaemonSetList } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.CreateOptions) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -90,7 +92,7 @@ func (c *FakeDaemonSets) Create(daemonSet *appsv1.DaemonSet) (result *appsv1.Dae } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -102,7 +104,7 @@ func (c *FakeDaemonSets) Update(daemonSet *appsv1.DaemonSet) (result *appsv1.Dae // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(daemonSet *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts v1.UpdateOptions) (*appsv1.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &appsv1.DaemonSet{}) @@ -113,7 +115,7 @@ func (c *FakeDaemonSets) UpdateStatus(daemonSet *appsv1.DaemonSet) (*appsv1.Daem } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDaemonSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{}) @@ -121,7 +123,7 @@ func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &appsv1.DaemonSetList{}) @@ -129,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &appsv1.DaemonSet{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 6a8cb379da..6b243acfe5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +42,7 @@ var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"} // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &appsv1.Deployment{}) @@ -51,7 +53,7 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *appsv } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(opts v1.ListOptions) (result *appsv1.DeploymentList, err error) { +func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.DeploymentList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &appsv1.DeploymentList{}) @@ -73,14 +75,14 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *appsv1.DeploymentLi } // Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *appsv1.Deployment, opts v1.CreateOptions) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) @@ -91,7 +93,7 @@ func (c *FakeDeployments) Create(deployment *appsv1.Deployment) (result *appsv1. } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{}) @@ -103,7 +105,7 @@ func (c *FakeDeployments) Update(deployment *appsv1.Deployment) (result *appsv1. // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(deployment *appsv1.Deployment) (*appsv1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts v1.UpdateOptions) (*appsv1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &appsv1.Deployment{}) @@ -114,7 +116,7 @@ func (c *FakeDeployments) UpdateStatus(deployment *appsv1.Deployment) (*appsv1.D } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDeployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &appsv1.Deployment{}) @@ -122,7 +124,7 @@ func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &appsv1.DeploymentList{}) @@ -130,7 +132,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &appsv1.Deployment{}) @@ -141,7 +143,7 @@ func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, su } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) @@ -152,7 +154,7 @@ func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index e871f82f76..38c7400bad 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +42,7 @@ var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"} // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{}) @@ -51,7 +53,7 @@ func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *appsv } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *appsv1.ReplicaSetList, err error) { +func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.ReplicaSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &appsv1.ReplicaSetList{}) @@ -73,14 +75,14 @@ func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *appsv1.ReplicaSetLi } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.CreateOptions) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -91,7 +93,7 @@ func (c *FakeReplicaSets) Create(replicaSet *appsv1.ReplicaSet) (result *appsv1. } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -103,7 +105,7 @@ func (c *FakeReplicaSets) Update(replicaSet *appsv1.ReplicaSet) (result *appsv1. // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(replicaSet *appsv1.ReplicaSet) (*appsv1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts v1.UpdateOptions) (*appsv1.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &appsv1.ReplicaSet{}) @@ -114,7 +116,7 @@ func (c *FakeReplicaSets) UpdateStatus(replicaSet *appsv1.ReplicaSet) (*appsv1.R } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeReplicaSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{}) @@ -122,7 +124,7 @@ func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &appsv1.ReplicaSetList{}) @@ -130,7 +132,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &appsv1.ReplicaSet{}) @@ -141,7 +143,7 @@ func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, su } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) @@ -152,7 +154,7 @@ func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 83e80bff49..2accba210d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +42,7 @@ var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: " var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"} // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{}) @@ -51,7 +53,7 @@ func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *apps } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *appsv1.StatefulSetList, err error) { +func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *appsv1.StatefulSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &appsv1.StatefulSetList{}) @@ -73,14 +75,14 @@ func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *appsv1.StatefulSet } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.CreateOptions) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -91,7 +93,7 @@ func (c *FakeStatefulSets) Create(statefulSet *appsv1.StatefulSet) (result *apps } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -103,7 +105,7 @@ func (c *FakeStatefulSets) Update(statefulSet *appsv1.StatefulSet) (result *apps // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(statefulSet *appsv1.StatefulSet) (*appsv1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts v1.UpdateOptions) (*appsv1.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &appsv1.StatefulSet{}) @@ -114,7 +116,7 @@ func (c *FakeStatefulSets) UpdateStatus(statefulSet *appsv1.StatefulSet) (*appsv } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeStatefulSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{}) @@ -122,7 +124,7 @@ func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &appsv1.StatefulSetList{}) @@ -130,7 +132,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &appsv1.StatefulSet{}) @@ -141,7 +143,7 @@ func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, s } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) @@ -152,7 +154,7 @@ func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOption } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index ff3504e78a..2b94003fb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/apps/v1" @@ -38,17 +39,17 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) - Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) - UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ReplicaSet, error) - List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) - GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) + GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ReplicaSetExpansion } @@ -68,20 +69,20 @@ func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { } // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { +func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -92,13 +93,13 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,63 +110,65 @@ func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { +func (c *replicaSets) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *replicaSets) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -176,26 +179,27 @@ func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { result = &v1.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -203,21 +207,22 @@ func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) Name(replicaSetName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index c12c470bba..7fd748cfe9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/apps/v1" @@ -38,17 +39,17 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(*v1.StatefulSet) (*v1.StatefulSet, error) - Update(*v1.StatefulSet) (*v1.StatefulSet, error) - UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.StatefulSet, error) - List(opts metav1.ListOptions) (*v1.StatefulSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) - GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) + GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) StatefulSetExpansion } @@ -68,20 +69,20 @@ func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { } // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { +func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -92,13 +93,13 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,63 +110,65 @@ func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error { +func (c *statefulSets) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *statefulSets) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -176,26 +179,27 @@ func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptio VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { result = &v1.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -203,21 +207,22 @@ func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOption Name(statefulSetName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 45ddb91592..d1833d0300 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/apps/v1beta1" @@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error) - List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) + Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) ControllerRevisionExpansion } @@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerR } // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { +func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { +func (c *controllerRevisions) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { result = &v1beta1.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 05fdcb7a64..64480f32e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/apps/v1beta1" @@ -37,15 +38,15 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) - List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) + Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) DeploymentExpansion } @@ -64,20 +65,20 @@ func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { } // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { +func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { +func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *deployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("deployments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 8e339d78b0..f005c6a756 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Ver var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "ControllerRevision"} // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) @@ -50,7 +52,7 @@ func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { +func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{}) @@ -72,14 +74,14 @@ func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Con } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) @@ -90,7 +92,7 @@ func (c *FakeControllerRevisions) Create(controllerRevision *v1beta1.ControllerR } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) @@ -101,7 +103,7 @@ func (c *FakeControllerRevisions) Update(controllerRevision *v1beta1.ControllerR } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) @@ -109,7 +111,7 @@ func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) } // DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) @@ -117,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index c33baba589..1e76cde078 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "Deployment"} // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -50,7 +52,7 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1bet } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { +func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) @@ -72,14 +74,14 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentL } // Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -90,7 +92,7 @@ func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -102,7 +104,7 @@ func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) @@ -113,7 +115,7 @@ func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1 } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDeployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -121,7 +123,7 @@ func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) @@ -129,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index 754da5fba6..6c443aedf3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: " var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "StatefulSet"} // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) @@ -50,7 +52,7 @@ func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1be } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { +func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{}) @@ -72,14 +74,14 @@ func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSe } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -90,7 +92,7 @@ func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1b } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -102,7 +104,7 @@ func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1b // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) @@ -113,7 +115,7 @@ func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1be } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeStatefulSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) @@ -121,7 +123,7 @@ func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) @@ -129,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index c4b35b424c..2b941a5fee 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/apps/v1beta1" @@ -37,15 +38,15 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) - List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) StatefulSetExpansion } @@ -64,20 +65,20 @@ func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { } // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { +func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *statefulSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *statefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1d6025155..f16e988382 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" "time" v1beta2 "k8s.io/api/apps/v1beta2" @@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error) - List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) + Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) ControllerRevisionExpansion } @@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerR } // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { +func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Post(). Namespace(c.ns). Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Put(). Namespace(c.ns). Resource("controllerrevisions"). Name(controllerRevision.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(controllerRevision). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { +func (c *controllerRevisions) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *controllerRevisions) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { +func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { result = &v1beta2.ControllerRevision{} err = c.client.Patch(pt). Namespace(c.ns). Resource("controllerrevisions"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index f8b7ac2597..6cfedf19fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" "time" v1beta2 "k8s.io/api/apps/v1beta2" @@ -37,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error) - List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) + Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) DaemonSetExpansion } @@ -64,20 +65,20 @@ func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { } // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { +func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *daemonSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *daemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1. VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { result = &v1beta2.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 510250b06e..abb09d2ab2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" "time" v1beta2 "k8s.io/api/apps/v1beta2" @@ -37,15 +38,15 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(*v1beta2.Deployment) (*v1beta2.Deployment, error) - Update(*v1beta2.Deployment) (*v1beta2.Deployment, error) - UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error) - List(opts v1.ListOptions) (*v1beta2.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) + Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) + Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) DeploymentExpansion } @@ -64,20 +65,20 @@ func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { } // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { +func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { +func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *deployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("deployments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { result = &v1beta2.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 197f190cbd..a6d041f0f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Ver var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ControllerRevision"} // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) @@ -50,7 +52,7 @@ func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { +func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{}) @@ -72,14 +74,14 @@ func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Con } // Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) @@ -90,7 +92,7 @@ func (c *FakeControllerRevisions) Create(controllerRevision *v1beta2.ControllerR } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) @@ -101,7 +103,7 @@ func (c *FakeControllerRevisions) Update(controllerRevision *v1beta2.ControllerR } // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) @@ -109,7 +111,7 @@ func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) } // DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) @@ -117,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { +func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index b50747fdc9..80b09794f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1 var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "DaemonSet"} // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) @@ -50,7 +52,7 @@ func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { +func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{}) @@ -72,14 +74,14 @@ func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetLis } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -90,7 +92,7 @@ func (c *FakeDaemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.D } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -102,7 +104,7 @@ func (c *FakeDaemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.D // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) @@ -113,7 +115,7 @@ func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (*v1beta2.Da } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDaemonSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) @@ -121,7 +123,7 @@ func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) @@ -129,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index b74d24ed7c..26cc4a4b6a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) @@ -50,7 +52,7 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1bet } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { +func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{}) @@ -72,14 +74,14 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentL } // Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) @@ -90,7 +92,7 @@ func (c *FakeDeployments) Create(deployment *v1beta2.Deployment) (result *v1beta } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) @@ -102,7 +104,7 @@ func (c *FakeDeployments) Update(deployment *v1beta2.Deployment) (result *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(deployment *v1beta2.Deployment) (*v1beta2.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) @@ -113,7 +115,7 @@ func (c *FakeDeployments) UpdateStatus(deployment *v1beta2.Deployment) (*v1beta2 } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDeployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) @@ -121,7 +123,7 @@ func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) @@ -129,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index ba1de33ecf..618c4ef74c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) @@ -50,7 +52,7 @@ func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1bet } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { +func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{}) @@ -72,14 +74,14 @@ func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetL } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -90,7 +92,7 @@ func (c *FakeReplicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -102,7 +104,7 @@ func (c *FakeReplicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) @@ -113,7 +115,7 @@ func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (*v1beta2 } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeReplicaSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) @@ -121,7 +123,7 @@ func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) @@ -129,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index 652c7cbc5d..ad6b736d68 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: " var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "StatefulSet"} // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) @@ -50,7 +52,7 @@ func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1be } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { +func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{}) @@ -72,14 +74,14 @@ func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSe } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -90,7 +92,7 @@ func (c *FakeStatefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1b } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -102,7 +104,7 @@ func (c *FakeStatefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1b // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) @@ -113,7 +115,7 @@ func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (*v1be } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeStatefulSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) @@ -121,7 +123,7 @@ func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) @@ -129,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { +func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) @@ -140,7 +142,7 @@ func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, s } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { +func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{}) @@ -151,7 +153,7 @@ func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOption } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { +func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 7b738774b7..84b7d16ae0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" "time" v1beta2 "k8s.io/api/apps/v1beta2" @@ -37,15 +38,15 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error) - List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) + Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) ReplicaSetExpansion } @@ -64,20 +65,20 @@ func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { } // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { +func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *replicaSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *replicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { result = &v1beta2.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index de7c3db8b5..ed6393043f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" "time" v1beta2 "k8s.io/api/apps/v1beta2" @@ -37,17 +38,17 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error) - List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) - GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) - UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error) + Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) + Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) + GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) StatefulSetExpansion } @@ -67,20 +68,20 @@ func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { } // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { +func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -91,13 +92,13 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -108,63 +109,65 @@ func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Post(). Namespace(c.ns). Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(statefulSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *statefulSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *statefulSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -175,26 +178,27 @@ func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { +func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { result = &v1beta2.StatefulSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("statefulsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { +func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -202,21 +206,22 @@ func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) ( Name(statefulSetName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go index 414d480062..68446e2176 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/auditregistration/v1alpha1" @@ -37,14 +38,14 @@ type AuditSinksGetter interface { // AuditSinkInterface has methods to work with AuditSink resources. type AuditSinkInterface interface { - Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) - List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (*v1alpha1.AuditSink, error) + Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (*v1alpha1.AuditSink, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AuditSink, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) AuditSinkExpansion } @@ -61,19 +62,19 @@ func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { } // Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. -func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Get(). Resource("auditsinks"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of AuditSinks that match those selectors. -func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { +func (c *auditSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, Resource("auditsinks"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested auditSinks. -func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *auditSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("auditsinks"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Post(). Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). Body(auditSink). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Put(). Resource("auditsinks"). Name(auditSink.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(auditSink). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the auditSink and deletes it. Returns an error if one occurs. -func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { +func (c *auditSinks) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("auditsinks"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *auditSinks) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1. VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched auditSink. -func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { +func (c *auditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) { result = &v1alpha1.AuditSink{} err = c.client.Patch(pt). Resource("auditsinks"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go index d0bb9fd000..bd0c95be21 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var auditsinksResource = schema.GroupVersionResource{Group: "auditregistration.k var auditsinksKind = schema.GroupVersionKind{Group: "auditregistration.k8s.io", Version: "v1alpha1", Kind: "AuditSink"} // Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. -func (c *FakeAuditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(auditsinksResource, name), &v1alpha1.AuditSink{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeAuditSinks) Get(name string, options v1.GetOptions) (result *v1alph } // List takes label and field selectors, and returns the list of AuditSinks that match those selectors. -func (c *FakeAuditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { +func (c *FakeAuditSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(auditsinksResource, auditsinksKind, opts), &v1alpha1.AuditSinkList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeAuditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkLi } // Watch returns a watch.Interface that watches the requested auditSinks. -func (c *FakeAuditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeAuditSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(auditsinksResource, opts)) } // Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *FakeAuditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeAuditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1 } // Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *FakeAuditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeAuditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1 } // Delete takes name of the auditSink and deletes it. Returns an error if one occurs. -func (c *FakeAuditSinks) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeAuditSinks) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(auditsinksResource, name), &v1alpha1.AuditSink{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeAuditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeAuditSinks) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(auditsinksResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.AuditSinkList{}) @@ -110,7 +112,7 @@ func (c *FakeAuditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched auditSink. -func (c *FakeAuditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { +func (c *FakeAuditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(auditsinksResource, name, pt, data, subresources...), &v1alpha1.AuditSink{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD index 7ddf71975c..6de5022f3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD @@ -12,12 +12,12 @@ go_library( "doc.go", "generated_expansion.go", "tokenreview.go", - "tokenreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1", importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD index 49c7137c9a..ae3591ec04 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/BUILD @@ -11,12 +11,13 @@ go_library( "doc.go", "fake_authentication_client.go", "fake_tokenreview.go", - "fake_tokenreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake", importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index e2a7f72b66..b85fcfbb87 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeTokenReviews implements TokenReviewInterface type FakeTokenReviews struct { Fake *FakeAuthenticationV1 } + +var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1", Resource: "tokenreviews"} + +var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1", Kind: "TokenReview"} + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. +func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1.TokenReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go deleted file mode 100644 index 610948ef10..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authenticationapi "k8s.io/api/authentication/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - return c.CreateContext(context.Background(), tokenReview) -} - -func (c *FakeTokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) - return obj.(*authenticationapi.TokenReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 177209ec61..0413fb2b66 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -17,3 +17,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1 + +type TokenReviewExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index 25a8d6a17c..ca7cd47d26 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1 import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { + Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error) TokenReviewExpansion } @@ -44,3 +50,15 @@ func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { client: c.RESTClient(), } } + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. +func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { + result = &v1.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(tokenReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go deleted file mode 100644 index 8a21b7c764..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - - authenticationapi "k8s.io/api/authentication/v1" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) - CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - return c.CreateContext(context.Background(), tokenReview) -} - -func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Context(ctx). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD index 6e4ee1bdd4..d6255e5109 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD @@ -12,12 +12,12 @@ go_library( "doc.go", "generated_expansion.go", "tokenreview.go", - "tokenreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1", importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", deps = [ "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD index a0abef9d1d..3e6f85182b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/BUILD @@ -11,12 +11,13 @@ go_library( "doc.go", "fake_authentication_client.go", "fake_tokenreview.go", - "fake_tokenreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", deps = [ "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index 63b6b6a853..0da3ec6f43 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeTokenReviews implements TokenReviewInterface type FakeTokenReviews struct { Fake *FakeAuthenticationV1beta1 } + +var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1beta1", Resource: "tokenreviews"} + +var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1beta1", Kind: "TokenReview"} + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. +func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.TokenReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go deleted file mode 100644 index f9c487c3d8..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authenticationapi "k8s.io/api/authentication/v1beta1" - core "k8s.io/client-go/testing" -) - -func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - return c.CreateContext(context.Background(), tokenReview) -} - -func (c *FakeTokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{}) - return obj.(*authenticationapi.TokenReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index f6df769632..60bf15ab99 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -17,3 +17,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 + +type TokenReviewExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 0ac3561e13..5da1224337 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1beta1 import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { + Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error) TokenReviewExpansion } @@ -44,3 +50,15 @@ func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { client: c.RESTClient(), } } + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. +func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { + result = &v1beta1.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(tokenReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go deleted file mode 100644 index 0476b17359..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - authenticationapi "k8s.io/api/authentication/v1beta1" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) - CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - return c.CreateContext(context.Background(), tokenReview) -} - -func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Context(ctx). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD index fcc864f224..ec4ee12021 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD @@ -12,18 +12,15 @@ go_library( "doc.go", "generated_expansion.go", "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", "selfsubjectrulesreview.go", - "selfsubjectrulesreview_expansion.go", "subjectaccessreview.go", - "subjectaccessreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1", importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1", deps = [ "//staging/src/k8s.io/api/authorization/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD index 91291d46d6..0a27a11098 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/BUILD @@ -11,18 +11,16 @@ go_library( "doc.go", "fake_authorization_client.go", "fake_localsubjectaccessreview.go", - "fake_localsubjectaccessreview_expansion.go", "fake_selfsubjectaccessreview.go", - "fake_selfsubjectaccessreview_expansion.go", "fake_selfsubjectrulesreview.go", - "fake_selfsubjectrulesreview_expansion.go", "fake_subjectaccessreview.go", - "fake_subjectaccessreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake", importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", deps = [ "//staging/src/k8s.io/api/authorization/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index 778ba9cea0..d74ae0a474 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -18,8 +18,32 @@ limitations under the License. package fake +import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface type FakeLocalSubjectAccessReviews struct { Fake *FakeAuthorizationV1 ns string } + +var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "localsubjectaccessreviews"} + +var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "LocalSubjectAccessReview"} + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. +func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LocalSubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go deleted file mode 100644 index 59c8773774..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeLocalSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) - return obj.(*authorizationapi.LocalSubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index a43a980baf..80ebbbd45f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type FakeSelfSubjectAccessReviews struct { Fake *FakeAuthorizationV1 } + +var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "selfsubjectaccessreviews"} + +var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectAccessReview"} + +// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. +func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1.SelfSubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go deleted file mode 100644 index d3ee5a6a8b..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeSelfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) - return obj.(*authorizationapi.SelfSubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index 243f2e89ee..dd70908ad3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type FakeSelfSubjectRulesReviews struct { Fake *FakeAuthorizationV1 } + +var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "selfsubjectrulesreviews"} + +var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectRulesReview"} + +// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. +func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1.SelfSubjectRulesReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go deleted file mode 100644 index 06f1cd6917..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - return c.CreateContext(context.Background(), srr) -} - -func (c *FakeSelfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) - return obj.(*authorizationapi.SelfSubjectRulesReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index d07e562546..b480b2b418 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSubjectAccessReviews implements SubjectAccessReviewInterface type FakeSubjectAccessReviews struct { Fake *FakeAuthorizationV1 } + +var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1", Resource: "subjectaccessreviews"} + +var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1", Kind: "SubjectAccessReview"} + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. +func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1.SubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go deleted file mode 100644 index 6e3f3b45b3..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) - if obj == nil { - return nil, err - } - return obj.(*authorizationapi.SubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go index 177209ec61..fe8c72cd4d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -17,3 +17,11 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1 + +type LocalSubjectAccessReviewExpansion interface{} + +type SelfSubjectAccessReviewExpansion interface{} + +type SelfSubjectRulesReviewExpansion interface{} + +type SubjectAccessReviewExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 0292c78618..84b2efe166 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1 import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { + Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } @@ -46,3 +52,16 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *l ns: namespace, } } + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. +func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { + result = &v1.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(localSubjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go deleted file mode 100644 index 9836308bd6..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 1e3a458178..2006196c11 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1 import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { + Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } @@ -44,3 +50,15 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessRev client: c.RESTClient(), } } + +// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. +func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { + result = &v1.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go deleted file mode 100644 index 916e5b43f0..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 50a0233eb9..25d99f7b52 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1 import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { + Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } @@ -44,3 +50,15 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesRevie client: c.RESTClient(), } } + +// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. +func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { + result = &v1.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectRulesReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go deleted file mode 100644 index 365282ed86..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" -) - -type SelfSubjectRulesReviewExpansion interface { - Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) - CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) -} - -func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - return c.CreateContext(context.Background(), srr) -} - -func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - result = &authorizationapi.SelfSubjectRulesReview{} - err = c.client.Post(). - Context(ctx). - Resource("selfsubjectrulesreviews"). - Body(srr). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 9c09008c3d..8ac0566a2e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1 import ( + "context" + + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { + Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error) SubjectAccessReviewExpansion } @@ -44,3 +50,15 @@ func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { client: c.RESTClient(), } } + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. +func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { + result = &v1.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(subjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go deleted file mode 100644 index 927544f127..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1" -) - -// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD index c830646718..e2904c06bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD @@ -12,18 +12,15 @@ go_library( "doc.go", "generated_expansion.go", "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", "selfsubjectrulesreview.go", - "selfsubjectrulesreview_expansion.go", "subjectaccessreview.go", - "subjectaccessreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1", importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", deps = [ "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD index 31085a76ca..4f552fe52d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/BUILD @@ -10,20 +10,17 @@ go_library( srcs = [ "doc.go", "fake_authorization_client.go", - "fake_generated_expansion.go", "fake_localsubjectaccessreview.go", - "fake_localsubjectaccessreview_expansion.go", "fake_selfsubjectaccessreview.go", - "fake_selfsubjectaccessreview_expansion.go", "fake_selfsubjectrulesreview.go", - "fake_selfsubjectrulesreview_expansion.go", "fake_subjectaccessreview.go", - "fake_subjectaccessreview_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", deps = [ "//staging/src/k8s.io/api/authorization/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go deleted file mode 100644 index 8754e39d87..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index d02d05e5d1..2d3ba44628 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -18,8 +18,32 @@ limitations under the License. package fake +import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface type FakeLocalSubjectAccessReviews struct { Fake *FakeAuthorizationV1beta1 ns string } + +var localsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "localsubjectaccessreviews"} + +var localsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "LocalSubjectAccessReview"} + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. +func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.LocalSubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go deleted file mode 100644 index f8580d28a8..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" - core "k8s.io/client-go/testing" -) - -func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeLocalSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{}) - return obj.(*authorizationapi.LocalSubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index 8f98ce7a3c..febe90c77a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type FakeSelfSubjectAccessReviews struct { Fake *FakeAuthorizationV1beta1 } + +var selfsubjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectaccessreviews"} + +var selfsubjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectAccessReview"} + +// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. +func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.SelfSubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go deleted file mode 100644 index cf1fe78700..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeSelfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{}) - return obj.(*authorizationapi.SelfSubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index d8466b4c8d..02df06012a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type FakeSelfSubjectRulesReviews struct { Fake *FakeAuthorizationV1beta1 } + +var selfsubjectrulesreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectrulesreviews"} + +var selfsubjectrulesreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SelfSubjectRulesReview"} + +// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. +func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.SelfSubjectRulesReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go deleted file mode 100644 index 27410b81cd..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - return c.CreateContext(context.Background(), srr) -} - -func (c *FakeSelfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) - return obj.(*authorizationapi.SelfSubjectRulesReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index 0d0abdb72a..b5be913c4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -18,7 +18,30 @@ limitations under the License. package fake +import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + // FakeSubjectAccessReviews implements SubjectAccessReviewInterface type FakeSubjectAccessReviews struct { Fake *FakeAuthorizationV1beta1 } + +var subjectaccessreviewsResource = schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "subjectaccessreviews"} + +var subjectaccessreviewsKind = schema.GroupVersionKind{Group: "authorization.k8s.io", Version: "v1beta1", Kind: "SubjectAccessReview"} + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. +func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.SubjectAccessReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go deleted file mode 100644 index 721c5963c6..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" - core "k8s.io/client-go/testing" -) - -func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *FakeSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{}) - return obj.(*authorizationapi.SubjectAccessReview), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go index f6df769632..ae2388301f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -17,3 +17,11 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 + +type LocalSubjectAccessReviewExpansion interface{} + +type SelfSubjectAccessReviewExpansion interface{} + +type SelfSubjectRulesReviewExpansion interface{} + +type SubjectAccessReviewExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index f5e86a76a1..78584ba945 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1beta1 import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { + Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } @@ -46,3 +52,16 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace strin ns: namespace, } } + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. +func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { + result = &v1beta1.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(localSubjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go deleted file mode 100644 index 148cf62823..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 906712cc31..0286c93fe6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1beta1 import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { + Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } @@ -44,3 +50,15 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAcce client: c.RESTClient(), } } + +// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. +func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { + result = &v1beta1.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go deleted file mode 100644 index 6edead0e77..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index 56c0f99d4f..d772973ec6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1beta1 import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { + Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } @@ -44,3 +50,15 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRules client: c.RESTClient(), } } + +// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. +func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { + result = &v1beta1.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectRulesReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go deleted file mode 100644 index a459d5c3ea..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type SelfSubjectRulesReviewExpansion interface { - Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) - CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) -} - -func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - return c.CreateContext(context.Background(), srr) -} - -func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - result = &authorizationapi.SelfSubjectRulesReview{} - err = c.client.Post(). - Context(ctx). - Resource("selfsubjectrulesreviews"). - Body(srr). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index 79f1ec5355..aebe8398c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -19,6 +19,11 @@ limitations under the License. package v1beta1 import ( + "context" + + v1beta1 "k8s.io/api/authorization/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +35,7 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { + Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error) SubjectAccessReviewExpansion } @@ -44,3 +50,15 @@ func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReview client: c.RESTClient(), } } + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. +func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { + result = &v1beta1.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(subjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go deleted file mode 100644 index 7072e29ca4..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) - CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - return c.CreateContext(context.Background(), sar) -} - -func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Context(ctx). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 6a4bf98810..22fe6d4573 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autos var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"} // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -50,7 +52,7 @@ func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) ( } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscalingv1.HorizontalPodAutoscalerList, err error) { +func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *autoscalingv1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &autoscalingv1.HorizontalPodAutoscalerList{}) @@ -72,14 +74,14 @@ func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *autosc } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -90,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscali } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -102,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscali // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (*autoscalingv1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -113,7 +115,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *aut } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{}) @@ -121,7 +123,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &autoscalingv1.HorizontalPodAutoscalerList{}) @@ -129,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 0e0839fb50..1228ce7313 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/autoscaling/v1" @@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) - List(opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *hori } // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { +func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interfa Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOptions) error { +func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 514a787cb1..de28395fa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autos var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscaler"} // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) @@ -50,7 +52,7 @@ func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) ( } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { +func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{}) @@ -72,14 +74,14 @@ func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -90,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.H } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -102,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.H // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) @@ -113,7 +115,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2b } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) @@ -121,7 +123,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) @@ -129,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 02d5cfb9b6..a5b564d042 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta1 import ( + "context" "time" v2beta1 "k8s.io/api/autoscaling/v2beta1" @@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) + Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) } // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { +func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { +func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, l VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index c0569f00ad..18ea026c16 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autos var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"} // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) @@ -50,7 +52,7 @@ func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) ( } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { +func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{}) @@ -72,14 +74,14 @@ func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -90,7 +92,7 @@ func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.H } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -102,7 +104,7 @@ func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.H // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) @@ -113,7 +115,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2b } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) @@ -121,7 +123,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{}) @@ -129,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 91a0fa64f9..168137d72b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta2 import ( + "context" "time" v2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Update(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - UpdateStatus(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) + Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) } // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { +func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(horizontalPodAutoscaler.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(horizontalPodAutoscaler). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { +func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, l VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { result = &v2beta2.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index 06dc25c6b4..b09872952c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var jobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Re var jobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"} // Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *FakeJobs) Get(name string, options v1.GetOptions) (result *batchv1.Job, err error) { +func (c *FakeJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(jobsResource, c.ns, name), &batchv1.Job{}) @@ -50,7 +52,7 @@ func (c *FakeJobs) Get(name string, options v1.GetOptions) (result *batchv1.Job, } // List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *FakeJobs) List(opts v1.ListOptions) (result *batchv1.JobList, err error) { +func (c *FakeJobs) List(ctx context.Context, opts v1.ListOptions) (result *batchv1.JobList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &batchv1.JobList{}) @@ -72,14 +74,14 @@ func (c *FakeJobs) List(opts v1.ListOptions) (result *batchv1.JobList, err error } // Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Create(job *batchv1.Job) (result *batchv1.Job, err error) { +func (c *FakeJobs) Create(ctx context.Context, job *batchv1.Job, opts v1.CreateOptions) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &batchv1.Job{}) @@ -90,7 +92,7 @@ func (c *FakeJobs) Create(job *batchv1.Job) (result *batchv1.Job, err error) { } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Update(job *batchv1.Job) (result *batchv1.Job, err error) { +func (c *FakeJobs) Update(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &batchv1.Job{}) @@ -102,7 +104,7 @@ func (c *FakeJobs) Update(job *batchv1.Job) (result *batchv1.Job, err error) { // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(job *batchv1.Job) (*batchv1.Job, error) { +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *batchv1.Job, opts v1.UpdateOptions) (*batchv1.Job, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batchv1.Job{}) @@ -113,7 +115,7 @@ func (c *FakeJobs) UpdateStatus(job *batchv1.Job) (*batchv1.Job, error) { } // Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *FakeJobs) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeJobs) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &batchv1.Job{}) @@ -121,7 +123,7 @@ func (c *FakeJobs) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &batchv1.JobList{}) @@ -129,7 +131,7 @@ func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li } // Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batchv1.Job, err error) { +func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1.Job, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &batchv1.Job{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index b55c602b34..1ad12320cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/batch/v1" @@ -37,15 +38,15 @@ type JobsGetter interface { // JobInterface has methods to work with Job resources. type JobInterface interface { - Create(*v1.Job) (*v1.Job, error) - Update(*v1.Job) (*v1.Job, error) - UpdateStatus(*v1.Job) (*v1.Job, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Job, error) - List(opts metav1.ListOptions) (*v1.JobList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) + Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) JobExpansion } @@ -64,20 +65,20 @@ func newJobs(c *BatchV1Client, namespace string) *jobs { } // Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err error) { +func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { +func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Post(). Namespace(c.ns). Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). Name(job.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { +func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). Name(job.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(job). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { +func (c *jobs) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("jobs"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *jobs) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched job. -func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { +func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { result = &v1.Job{} err = c.client.Patch(pt). Namespace(c.ns). Resource("jobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d89d2fa21d..743360e993 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/batch/v1beta1" @@ -37,15 +38,15 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(*v1beta1.CronJob) (*v1beta1.CronJob, error) - Update(*v1beta1.CronJob) (*v1beta1.CronJob, error) - UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error) - List(opts v1.ListOptions) (*v1beta1.CronJobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) + Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) + Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) CronJobExpansion } @@ -64,20 +65,20 @@ func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { } // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { +func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Post(). Namespace(c.ns). Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { +func (c *cronJobs) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *cronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { +func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { result = &v1beta1.CronJob{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cronjobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 3985c40374..3cc605dff5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1b var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1beta1", Kind: "CronJob"} // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) @@ -50,7 +52,7 @@ func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v1beta1. } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { +func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{}) @@ -72,14 +74,14 @@ func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, e } // Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) @@ -90,7 +92,7 @@ func (c *FakeCronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) @@ -102,7 +104,7 @@ func (c *FakeCronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (*v1beta1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) @@ -113,7 +115,7 @@ func (c *FakeCronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (*v1beta1.CronJob, } // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCronJobs) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) @@ -121,7 +123,7 @@ func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) @@ -129,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v } // Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { +func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 19123b6041..ea3cfefc93 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v2alpha1 import ( + "context" "time" v2alpha1 "k8s.io/api/batch/v2alpha1" @@ -37,15 +38,15 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) - List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (*v2alpha1.CronJob, error) + Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) + UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CronJob, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CronJobList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) CronJobExpansion } @@ -64,20 +65,20 @@ func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { } // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { +func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Post(). Namespace(c.ns). Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Put(). Namespace(c.ns). Resource("cronjobs"). Name(cronJob.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cronJob). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { +func (c *cronJobs) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *cronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { +func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} err = c.client.Patch(pt). Namespace(c.ns). Resource("cronjobs"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 2195027d27..f004d6793a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v2a var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v2alpha1", Kind: "CronJob"} // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) @@ -50,7 +52,7 @@ func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1 } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { +func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v2alpha1.CronJobList{}) @@ -72,14 +74,14 @@ func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, } // Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) @@ -90,7 +92,7 @@ func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJ } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) @@ -102,7 +104,7 @@ func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJ // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{}) @@ -113,7 +115,7 @@ func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJo } // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCronJobs) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) @@ -121,7 +123,7 @@ func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCronJobs) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v2alpha1.CronJobList{}) @@ -129,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v } // Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { +func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CronJob{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 712d3a01af..a53b7e7477 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/certificates/v1beta1" @@ -37,15 +38,15 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) - List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } @@ -62,19 +63,19 @@ func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSig } // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Get(). Resource("certificatesigningrequests"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { +func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Post(). Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(certificateSigningRequest). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { +func (c *certificateSigningRequests) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("certificatesigningrequests"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} err = c.client.Patch(pt). Resource("certificatesigningrequests"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index c63b80638f..dd954bb168 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "context" + certificates "k8s.io/api/certificates/v1beta1" ) @@ -31,7 +33,7 @@ func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *c Name(certificateSigningRequest.Name). Body(certificateSigningRequest). SubResource("approval"). - Do(). + Do(context.TODO()). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index aa45c88033..53b7331809 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "cer var certificatesigningrequestsKind = schema.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"} // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { +func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1be } // Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1bet } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1bet // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest } // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) @@ -121,7 +123,7 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOpti } // Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { +func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index 940c738c1b..ba65f6205e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var leasesResource = schema.GroupVersionResource{Group: "coordination.k8s.io", V var leasesKind = schema.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1", Kind: "Lease"} // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(name string, options v1.GetOptions) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(leasesResource, c.ns, name), &coordinationv1.Lease{}) @@ -50,7 +52,7 @@ func (c *FakeLeases) Get(name string, options v1.GetOptions) (result *coordinati } // List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(opts v1.ListOptions) (result *coordinationv1.LeaseList, err error) { +func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *coordinationv1.LeaseList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &coordinationv1.LeaseList{}) @@ -72,14 +74,14 @@ func (c *FakeLeases) List(opts v1.ListOptions) (result *coordinationv1.LeaseList } // Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(lease *coordinationv1.Lease) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Create(ctx context.Context, lease *coordinationv1.Lease, opts v1.CreateOptions) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) @@ -90,7 +92,7 @@ func (c *FakeLeases) Create(lease *coordinationv1.Lease) (result *coordinationv1 } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(lease *coordinationv1.Lease) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Update(ctx context.Context, lease *coordinationv1.Lease, opts v1.UpdateOptions) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &coordinationv1.Lease{}) @@ -101,7 +103,7 @@ func (c *FakeLeases) Update(lease *coordinationv1.Lease) (result *coordinationv1 } // Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeLeases) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(leasesResource, c.ns, name), &coordinationv1.Lease{}) @@ -109,7 +111,7 @@ func (c *FakeLeases) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeLeases) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &coordinationv1.LeaseList{}) @@ -117,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *coordinationv1.Lease, err error) { +func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &coordinationv1.Lease{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index b6cf1b64f6..5077779b0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/coordination/v1" @@ -37,14 +38,14 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(*v1.Lease) (*v1.Lease, error) - Update(*v1.Lease) (*v1.Lease, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Lease, error) - List(opts metav1.ListOptions) (*v1.LeaseList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) + Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error) + Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) LeaseExpansion } @@ -63,20 +64,20 @@ func newLeases(c *CoordinationV1Client, namespace string) *leases { } // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { +func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { +func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { +func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Post(). Namespace(c.ns). Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { +func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Put(). Namespace(c.ns). Resource("leases"). Name(lease.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { +func (c *leases) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("leases"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *leases) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions met VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { +func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { result = &v1.Lease{} err = c.client.Patch(pt). Namespace(c.ns). Resource("leases"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 0ebf3bffc2..53fc60baff 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var leasesResource = schema.GroupVersionResource{Group: "coordination.k8s.io", V var leasesKind = schema.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1beta1", Kind: "Lease"} // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{}) @@ -50,7 +52,7 @@ func (c *FakeLeases) Get(name string, options v1.GetOptions) (result *v1beta1.Le } // List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { +func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{}) @@ -72,14 +74,14 @@ func (c *FakeLeases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err e } // Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) @@ -90,7 +92,7 @@ func (c *FakeLeases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err er } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) @@ -101,7 +103,7 @@ func (c *FakeLeases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err er } // Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeLeases) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(leasesResource, c.ns, name), &v1beta1.Lease{}) @@ -109,7 +111,7 @@ func (c *FakeLeases) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeLeases) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{}) @@ -117,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { +func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 490d815aa6..ca65b57fa7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/coordination/v1beta1" @@ -37,14 +38,14 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(*v1beta1.Lease) (*v1beta1.Lease, error) - Update(*v1beta1.Lease) (*v1beta1.Lease, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Lease, error) - List(opts v1.ListOptions) (*v1beta1.LeaseList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) + Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error) + Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) LeaseExpansion } @@ -63,20 +64,20 @@ func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { } // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { +func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { +func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Post(). Namespace(c.ns). Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { +func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Put(). Namespace(c.ns). Resource("leases"). Name(lease.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(lease). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *v1.DeleteOptions) error { +func (c *leases) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("leases"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *leases) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { +func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { result = &v1beta1.Lease{} err = c.client.Patch(pt). Namespace(c.ns). Resource("leases"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD index d980818913..da2a237b1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD @@ -32,7 +32,6 @@ go_library( "service.go", "service_expansion.go", "serviceaccount.go", - "serviceaccount_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1", importpath = "k8s.io/client-go/kubernetes/typed/core/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 302b2fdc34..230ea29c5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type ComponentStatusesGetter interface { // ComponentStatusInterface has methods to work with ComponentStatus resources. type ComponentStatusInterface interface { - Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) - Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ComponentStatus, error) - List(opts metav1.ListOptions) (*v1.ComponentStatusList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error) + Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) ComponentStatusExpansion } @@ -61,19 +62,19 @@ func newComponentStatuses(c *CoreV1Client) *componentStatuses { } // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Get(). Resource("componentstatuses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { +func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, err Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Post(). Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(componentStatus). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Put(). Resource("componentstatuses"). Name(componentStatus.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(componentStatus). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) error { +func (c *componentStatuses) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("componentstatuses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *componentStatuses) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, list VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { +func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} err = c.client.Patch(pt). Resource("componentstatuses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 18ce954ae2..c4f7090813 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type ConfigMapsGetter interface { // ConfigMapInterface has methods to work with ConfigMap resources. type ConfigMapInterface interface { - Create(*v1.ConfigMap) (*v1.ConfigMap, error) - Update(*v1.ConfigMap) (*v1.ConfigMap, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) - List(opts metav1.ListOptions) (*v1.ConfigMapList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) + Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) ConfigMapExpansion } @@ -63,20 +64,20 @@ func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { } // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { +func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { +func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Post(). Namespace(c.ns). Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). Body(configMap). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Put(). Namespace(c.ns). Resource("configmaps"). Name(configMap.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(configMap). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { +func (c *configMaps) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *configMaps) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { +func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} err = c.client.Patch(pt). Namespace(c.ns). Resource("configmaps"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 978a2a196c..925b9e94cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type EndpointsGetter interface { // EndpointsInterface has methods to work with Endpoints resources. type EndpointsInterface interface { - Create(*v1.Endpoints) (*v1.Endpoints, error) - Update(*v1.Endpoints) (*v1.Endpoints, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Endpoints, error) - List(opts metav1.ListOptions) (*v1.EndpointsList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error) + Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) EndpointsExpansion } @@ -63,20 +64,20 @@ func newEndpoints(c *CoreV1Client, namespace string) *endpoints { } // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { +func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { +func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Post(). Namespace(c.ns). Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpoints). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Put(). Namespace(c.ns). Resource("endpoints"). Name(endpoints.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpoints). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { +func (c *endpoints) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *endpoints) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { +func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpoints"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 55cfa0901b..1073effcc6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(*v1.Event) (*v1.Event, error) - Update(*v1.Event) (*v1.Event, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Event, error) - List(opts metav1.ListOptions) (*v1.EventList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) + Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) EventExpansion } @@ -63,20 +64,20 @@ func newEvents(c *CoreV1Client, namespace string) *events { } // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, err error) { +func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Get(). Namespace(c.ns). Resource("events"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { +func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { +func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Post(). Namespace(c.ns). Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { +func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Put(). Namespace(c.ns). Resource("events"). Name(event.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *metav1.DeleteOptions) error { +func (c *events) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("events"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *events) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions met VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { +func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { result = &v1.Event{} err = c.client.Patch(pt). Namespace(c.ns). Resource("events"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 5a82afa42f..211cf0603c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -17,9 +17,10 @@ limitations under the License. package v1 import ( + "context" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" @@ -54,7 +55,7 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). - Do(). + Do(context.TODO()). Into(result) return result, err } @@ -71,7 +72,7 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { Resource("events"). Name(event.Name). Body(event). - Do(). + Do(context.TODO()). Into(result) return result, err } @@ -91,7 +92,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) Resource("events"). Name(incompleteEvent.Name). Body(data). - Do(). + Do(context.TODO()). Into(result) return result, err } @@ -118,7 +119,7 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) + return e.List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()}) } // Returns the appropriate field selector based on the API version being used to communicate with the server. diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD index 022c147acd..c1fdac3d3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD @@ -31,7 +31,6 @@ go_library( "fake_service.go", "fake_service_expansion.go", "fake_serviceaccount.go", - "fake_serviceaccount_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake", importpath = "k8s.io/client-go/kubernetes/typed/core/v1/fake", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 18beedc2d3..967b9f7c6a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var componentstatusesResource = schema.GroupVersionResource{Group: "", Version: var componentstatusesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ComponentStatus"} // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *FakeComponentStatuses) Get(name string, options v1.GetOptions) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(componentstatusesResource, name), &corev1.ComponentStatus{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeComponentStatuses) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *FakeComponentStatuses) List(opts v1.ListOptions) (result *corev1.ComponentStatusList, err error) { +func (c *FakeComponentStatuses) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ComponentStatusList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &corev1.ComponentStatusList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeComponentStatuses) List(opts v1.ListOptions) (result *corev1.Compon } // Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeComponentStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Create(componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.CreateOptions) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeComponentStatuses) Create(componentStatus *corev1.ComponentStatus) } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Update(componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts v1.UpdateOptions) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeComponentStatuses) Update(componentStatus *corev1.ComponentStatus) } // Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *FakeComponentStatuses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &corev1.ComponentStatus{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) _, err := c.Fake.Invokes(action, &corev1.ComponentStatusList{}) @@ -110,7 +112,7 @@ func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, list } // Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ComponentStatus, err error) { +func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &corev1.ComponentStatus{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index 2361ac3fe9..299306064d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var configmapsResource = schema.GroupVersionResource{Group: "", Version: "v1", R var configmapsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *FakeConfigMaps) Get(name string, options v1.GetOptions) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &corev1.ConfigMap{}) @@ -50,7 +52,7 @@ func (c *FakeConfigMaps) Get(name string, options v1.GetOptions) (result *corev1 } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *FakeConfigMaps) List(opts v1.ListOptions) (result *corev1.ConfigMapList, err error) { +func (c *FakeConfigMaps) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ConfigMapList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &corev1.ConfigMapList{}) @@ -72,14 +74,14 @@ func (c *FakeConfigMaps) List(opts v1.ListOptions) (result *corev1.ConfigMapList } // Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeConfigMaps) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Create(configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Create(ctx context.Context, configMap *corev1.ConfigMap, opts v1.CreateOptions) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) @@ -90,7 +92,7 @@ func (c *FakeConfigMaps) Create(configMap *corev1.ConfigMap) (result *corev1.Con } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Update(configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Update(ctx context.Context, configMap *corev1.ConfigMap, opts v1.UpdateOptions) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{}) @@ -101,7 +103,7 @@ func (c *FakeConfigMaps) Update(configMap *corev1.ConfigMap) (result *corev1.Con } // Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *FakeConfigMaps) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeConfigMaps) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &corev1.ConfigMap{}) @@ -109,7 +111,7 @@ func (c *FakeConfigMaps) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.ConfigMapList{}) @@ -117,7 +119,7 @@ func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ConfigMap, err error) { +func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &corev1.ConfigMap{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index d521af4083..4c898728b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var endpointsResource = schema.GroupVersionResource{Group: "", Version: "v1", Re var endpointsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"} // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *FakeEndpoints) Get(name string, options v1.GetOptions) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &corev1.Endpoints{}) @@ -50,7 +52,7 @@ func (c *FakeEndpoints) Get(name string, options v1.GetOptions) (result *corev1. } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *FakeEndpoints) List(opts v1.ListOptions) (result *corev1.EndpointsList, err error) { +func (c *FakeEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *corev1.EndpointsList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &corev1.EndpointsList{}) @@ -72,14 +74,14 @@ func (c *FakeEndpoints) List(opts v1.ListOptions) (result *corev1.EndpointsList, } // Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Create(endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Create(ctx context.Context, endpoints *corev1.Endpoints, opts v1.CreateOptions) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) @@ -90,7 +92,7 @@ func (c *FakeEndpoints) Create(endpoints *corev1.Endpoints) (result *corev1.Endp } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Update(endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Update(ctx context.Context, endpoints *corev1.Endpoints, opts v1.UpdateOptions) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{}) @@ -101,7 +103,7 @@ func (c *FakeEndpoints) Update(endpoints *corev1.Endpoints) (result *corev1.Endp } // Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *FakeEndpoints) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeEndpoints) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &corev1.Endpoints{}) @@ -109,7 +111,7 @@ func (c *FakeEndpoints) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeEndpoints) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.EndpointsList{}) @@ -117,7 +119,7 @@ func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Endpoints, err error) { +func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &corev1.Endpoints{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 3444f4be96..ca531f5c88 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var eventsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resou var eventsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Event"} // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *corev1.Event, err error) { +func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(eventsResource, c.ns, name), &corev1.Event{}) @@ -50,7 +52,7 @@ func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *corev1.Eve } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(opts v1.ListOptions) (result *corev1.EventList, err error) { +func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *corev1.EventList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &corev1.EventList{}) @@ -72,14 +74,14 @@ func (c *FakeEvents) List(opts v1.ListOptions) (result *corev1.EventList, err er } // Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(event *corev1.Event) (result *corev1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *corev1.Event, opts v1.CreateOptions) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &corev1.Event{}) @@ -90,7 +92,7 @@ func (c *FakeEvents) Create(event *corev1.Event) (result *corev1.Event, err erro } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(event *corev1.Event) (result *corev1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *corev1.Event, opts v1.UpdateOptions) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &corev1.Event{}) @@ -101,7 +103,7 @@ func (c *FakeEvents) Update(event *corev1.Event) (result *corev1.Event, err erro } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeEvents) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &corev1.Event{}) @@ -109,7 +111,7 @@ func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeEvents) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.EventList{}) @@ -117,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &corev1.Event{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index d110031f83..52e473b1eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var limitrangesResource = schema.GroupVersionResource{Group: "", Version: "v1", var limitrangesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "LimitRange"} // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *FakeLimitRanges) Get(name string, options v1.GetOptions) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &corev1.LimitRange{}) @@ -50,7 +52,7 @@ func (c *FakeLimitRanges) Get(name string, options v1.GetOptions) (result *corev } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *FakeLimitRanges) List(opts v1.ListOptions) (result *corev1.LimitRangeList, err error) { +func (c *FakeLimitRanges) List(ctx context.Context, opts v1.ListOptions) (result *corev1.LimitRangeList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &corev1.LimitRangeList{}) @@ -72,14 +74,14 @@ func (c *FakeLimitRanges) List(opts v1.ListOptions) (result *corev1.LimitRangeLi } // Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeLimitRanges) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Create(limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *corev1.LimitRange, opts v1.CreateOptions) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) @@ -90,7 +92,7 @@ func (c *FakeLimitRanges) Create(limitRange *corev1.LimitRange) (result *corev1. } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Update(limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *corev1.LimitRange, opts v1.UpdateOptions) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{}) @@ -101,7 +103,7 @@ func (c *FakeLimitRanges) Update(limitRange *corev1.LimitRange) (result *corev1. } // Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *FakeLimitRanges) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeLimitRanges) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &corev1.LimitRange{}) @@ -109,7 +111,7 @@ func (c *FakeLimitRanges) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.LimitRangeList{}) @@ -117,7 +119,7 @@ func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.LimitRange, err error) { +func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &corev1.LimitRange{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 21387b5e25..12b6851687 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var namespacesResource = schema.GroupVersionResource{Group: "", Version: "v1", R var namespacesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *FakeNamespaces) Get(name string, options v1.GetOptions) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(namespacesResource, name), &corev1.Namespace{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeNamespaces) Get(name string, options v1.GetOptions) (result *corev1 } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *FakeNamespaces) List(opts v1.ListOptions) (result *corev1.NamespaceList, err error) { +func (c *FakeNamespaces) List(ctx context.Context, opts v1.ListOptions) (result *corev1.NamespaceList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &corev1.NamespaceList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeNamespaces) List(opts v1.ListOptions) (result *corev1.NamespaceList } // Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNamespaces) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Create(namespace *corev1.Namespace) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Create(ctx context.Context, namespace *corev1.Namespace, opts v1.CreateOptions) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &corev1.Namespace{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeNamespaces) Create(namespace *corev1.Namespace) (result *corev1.Nam } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Update(namespace *corev1.Namespace) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Update(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &corev1.Namespace{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeNamespaces) Update(namespace *corev1.Namespace) (result *corev1.Nam // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(namespace *corev1.Namespace) (*corev1.Namespace, error) { +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts v1.UpdateOptions) (*corev1.Namespace, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &corev1.Namespace{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeNamespaces) UpdateStatus(namespace *corev1.Namespace) (*corev1.Name } // Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *FakeNamespaces) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeNamespaces) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(namespacesResource, name), &corev1.Namespace{}) return err } // Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error) { +func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &corev1.Namespace{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index bcde116a4e..0d9f566b12 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var nodesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resour var nodesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} // Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *FakeNodes) Get(name string, options v1.GetOptions) (result *corev1.Node, err error) { +func (c *FakeNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(nodesResource, name), &corev1.Node{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeNodes) Get(name string, options v1.GetOptions) (result *corev1.Node } // List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *FakeNodes) List(opts v1.ListOptions) (result *corev1.NodeList, err error) { +func (c *FakeNodes) List(ctx context.Context, opts v1.ListOptions) (result *corev1.NodeList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &corev1.NodeList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeNodes) List(opts v1.ListOptions) (result *corev1.NodeList, err erro } // Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Create(node *corev1.Node) (result *corev1.Node, err error) { +func (c *FakeNodes) Create(ctx context.Context, node *corev1.Node, opts v1.CreateOptions) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(nodesResource, node), &corev1.Node{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeNodes) Create(node *corev1.Node) (result *corev1.Node, err error) { } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Update(node *corev1.Node) (result *corev1.Node, err error) { +func (c *FakeNodes) Update(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(nodesResource, node), &corev1.Node{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeNodes) Update(node *corev1.Node) (result *corev1.Node, err error) { // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(node *corev1.Node) (*corev1.Node, error) { +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *corev1.Node, opts v1.UpdateOptions) (*corev1.Node, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &corev1.Node{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeNodes) UpdateStatus(node *corev1.Node) (*corev1.Node, error) { } // Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *FakeNodes) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeNodes) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(nodesResource, name), &corev1.Node{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeNodes) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(nodesResource, listOptions) _, err := c.Fake.Invokes(action, &corev1.NodeList{}) @@ -121,7 +123,7 @@ func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L } // Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) { +func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Node, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &corev1.Node{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index a39022c83f..eccf9fec63 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -17,13 +17,15 @@ limitations under the License. package fake import ( - "k8s.io/api/core/v1" + "context" + + v1 "k8s.io/api/core/v1" types "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" ) // TODO: Should take a PatchType as an argument probably. -func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { +func (c *FakeNodes) PatchStatus(_ context.Context, nodeName string, data []byte) (*v1.Node, error) { // TODO: Should be configurable to support additional patch strategies. pt := types.StrategicMergePatchType obj, err := c.Fake.Invokes( diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 843f323075..f5c9f15834 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var persistentvolumesResource = schema.GroupVersionResource{Group: "", Version: var persistentvolumesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolume"} // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *FakePersistentVolumes) Get(name string, options v1.GetOptions) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &corev1.PersistentVolume{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePersistentVolumes) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *FakePersistentVolumes) List(opts v1.ListOptions) (result *corev1.PersistentVolumeList, err error) { +func (c *FakePersistentVolumes) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PersistentVolumeList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &corev1.PersistentVolumeList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePersistentVolumes) List(opts v1.ListOptions) (result *corev1.Persis } // Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Create(persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.CreateOptions) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePersistentVolumes) Create(persistentVolume *corev1.PersistentVolume } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Update(persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakePersistentVolumes) Update(persistentVolume *corev1.PersistentVolume // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *corev1.PersistentVolume) (*corev1.PersistentVolume, error) { +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts v1.UpdateOptions) (*corev1.PersistentVolume, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &corev1.PersistentVolume{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *corev1.Persistent } // Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumes) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &corev1.PersistentVolume{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeList{}) @@ -121,7 +123,7 @@ func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, list } // Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolume, err error) { +func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &corev1.PersistentVolume{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index d2557c4c83..de0dd1c1a5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var persistentvolumeclaimsResource = schema.GroupVersionResource{Group: "", Vers var persistentvolumeclaimsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolumeClaim"} // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *FakePersistentVolumeClaims) Get(name string, options v1.GetOptions) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{}) @@ -50,7 +52,7 @@ func (c *FakePersistentVolumeClaims) Get(name string, options v1.GetOptions) (re } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *FakePersistentVolumeClaims) List(opts v1.ListOptions) (result *corev1.PersistentVolumeClaimList, err error) { +func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PersistentVolumeClaimList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &corev1.PersistentVolumeClaimList{}) @@ -72,14 +74,14 @@ func (c *FakePersistentVolumeClaims) List(opts v1.ListOptions) (result *corev1.P } // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.CreateOptions) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -90,7 +92,7 @@ func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *corev1.Persis } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -102,7 +104,7 @@ func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *corev1.Persis // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) { +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts v1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{}) @@ -113,7 +115,7 @@ func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *corev1. } // Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumeClaims) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{}) @@ -121,7 +123,7 @@ func (c *FakePersistentVolumeClaims) Delete(name string, options *v1.DeleteOptio } // DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeClaimList{}) @@ -129,7 +131,7 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { +func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &corev1.PersistentVolumeClaim{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index ebd473ac75..e408fd0af5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resourc var podsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *FakePods) Get(name string, options v1.GetOptions) (result *corev1.Pod, err error) { +func (c *FakePods) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(podsResource, c.ns, name), &corev1.Pod{}) @@ -50,7 +52,7 @@ func (c *FakePods) Get(name string, options v1.GetOptions) (result *corev1.Pod, } // List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *FakePods) List(opts v1.ListOptions) (result *corev1.PodList, err error) { +func (c *FakePods) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PodList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &corev1.PodList{}) @@ -72,14 +74,14 @@ func (c *FakePods) List(opts v1.ListOptions) (result *corev1.PodList, err error) } // Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePods) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Create(pod *corev1.Pod) (result *corev1.Pod, err error) { +func (c *FakePods) Create(ctx context.Context, pod *corev1.Pod, opts v1.CreateOptions) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &corev1.Pod{}) @@ -90,7 +92,7 @@ func (c *FakePods) Create(pod *corev1.Pod) (result *corev1.Pod, err error) { } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Update(pod *corev1.Pod) (result *corev1.Pod, err error) { +func (c *FakePods) Update(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &corev1.Pod{}) @@ -102,7 +104,7 @@ func (c *FakePods) Update(pod *corev1.Pod) (result *corev1.Pod, err error) { // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(pod *corev1.Pod) (*corev1.Pod, error) { +func (c *FakePods) UpdateStatus(ctx context.Context, pod *corev1.Pod, opts v1.UpdateOptions) (*corev1.Pod, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &corev1.Pod{}) @@ -113,7 +115,7 @@ func (c *FakePods) UpdateStatus(pod *corev1.Pod) (*corev1.Pod, error) { } // Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *FakePods) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePods) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &corev1.Pod{}) @@ -121,7 +123,7 @@ func (c *FakePods) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePods) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.PodList{}) @@ -129,7 +131,7 @@ func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li } // Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) { +func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Pod, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &corev1.Pod{}) @@ -140,7 +142,7 @@ func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresour } // GetEphemeralContainers takes name of the pod, and returns the corresponding ephemeralContainers object, and an error if there is any. -func (c *FakePods) GetEphemeralContainers(podName string, options v1.GetOptions) (result *corev1.EphemeralContainers, err error) { +func (c *FakePods) GetEphemeralContainers(ctx context.Context, podName string, options v1.GetOptions) (result *corev1.EphemeralContainers, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(podsResource, c.ns, "ephemeralcontainers", podName), &corev1.EphemeralContainers{}) @@ -151,7 +153,7 @@ func (c *FakePods) GetEphemeralContainers(podName string, options v1.GetOptions) } // UpdateEphemeralContainers takes the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. -func (c *FakePods) UpdateEphemeralContainers(podName string, ephemeralContainers *corev1.EphemeralContainers) (result *corev1.EphemeralContainers, err error) { +func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *corev1.EphemeralContainers, opts v1.UpdateOptions) (result *corev1.EphemeralContainers, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, ephemeralContainers), &corev1.EphemeralContainers{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 307f30594e..905cafeae4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var podtemplatesResource = schema.GroupVersionResource{Group: "", Version: "v1", var podtemplatesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodTemplate"} // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *FakePodTemplates) Get(name string, options v1.GetOptions) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{}) @@ -50,7 +52,7 @@ func (c *FakePodTemplates) Get(name string, options v1.GetOptions) (result *core } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *FakePodTemplates) List(opts v1.ListOptions) (result *corev1.PodTemplateList, err error) { +func (c *FakePodTemplates) List(ctx context.Context, opts v1.ListOptions) (result *corev1.PodTemplateList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &corev1.PodTemplateList{}) @@ -72,14 +74,14 @@ func (c *FakePodTemplates) List(opts v1.ListOptions) (result *corev1.PodTemplate } // Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Create(podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.CreateOptions) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) @@ -90,7 +92,7 @@ func (c *FakePodTemplates) Create(podTemplate *corev1.PodTemplate) (result *core } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Update(podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts v1.UpdateOptions) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{}) @@ -101,7 +103,7 @@ func (c *FakePodTemplates) Update(podTemplate *corev1.PodTemplate) (result *core } // Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *FakePodTemplates) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePodTemplates) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{}) @@ -109,7 +111,7 @@ func (c *FakePodTemplates) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePodTemplates) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.PodTemplateList{}) @@ -117,7 +119,7 @@ func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PodTemplate, err error) { +func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &corev1.PodTemplate{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 6de94c1482..12d5d9423a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +42,7 @@ var replicationcontrollersResource = schema.GroupVersionResource{Group: "", Vers var replicationcontrollersKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"} // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *FakeReplicationControllers) Get(name string, options v1.GetOptions) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{}) @@ -51,7 +53,7 @@ func (c *FakeReplicationControllers) Get(name string, options v1.GetOptions) (re } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *FakeReplicationControllers) List(opts v1.ListOptions) (result *corev1.ReplicationControllerList, err error) { +func (c *FakeReplicationControllers) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ReplicationControllerList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &corev1.ReplicationControllerList{}) @@ -73,14 +75,14 @@ func (c *FakeReplicationControllers) List(opts v1.ListOptions) (result *corev1.R } // Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicationControllers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Create(replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.CreateOptions) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) @@ -91,7 +93,7 @@ func (c *FakeReplicationControllers) Create(replicationController *corev1.Replic } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Update(replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{}) @@ -103,7 +105,7 @@ func (c *FakeReplicationControllers) Update(replicationController *corev1.Replic // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(replicationController *corev1.ReplicationController) (*corev1.ReplicationController, error) { +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts v1.UpdateOptions) (*corev1.ReplicationController, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &corev1.ReplicationController{}) @@ -114,7 +116,7 @@ func (c *FakeReplicationControllers) UpdateStatus(replicationController *corev1. } // Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *FakeReplicationControllers) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{}) @@ -122,7 +124,7 @@ func (c *FakeReplicationControllers) Delete(name string, options *v1.DeleteOptio } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.ReplicationControllerList{}) @@ -130,7 +132,7 @@ func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, } // Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ReplicationController, err error) { +func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &corev1.ReplicationController{}) @@ -141,7 +143,7 @@ func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) @@ -152,7 +154,7 @@ func (c *FakeReplicationControllers) GetScale(replicationControllerName string, } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts v1.UpdateOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index b521f7120b..c867270c0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var resourcequotasResource = schema.GroupVersionResource{Group: "", Version: "v1 var resourcequotasKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"} // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *FakeResourceQuotas) Get(name string, options v1.GetOptions) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{}) @@ -50,7 +52,7 @@ func (c *FakeResourceQuotas) Get(name string, options v1.GetOptions) (result *co } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *FakeResourceQuotas) List(opts v1.ListOptions) (result *corev1.ResourceQuotaList, err error) { +func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ResourceQuotaList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &corev1.ResourceQuotaList{}) @@ -72,14 +74,14 @@ func (c *FakeResourceQuotas) List(opts v1.ListOptions) (result *corev1.ResourceQ } // Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeResourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Create(resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.CreateOptions) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -90,7 +92,7 @@ func (c *FakeResourceQuotas) Create(resourceQuota *corev1.ResourceQuota) (result } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Update(resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -102,7 +104,7 @@ func (c *FakeResourceQuotas) Update(resourceQuota *corev1.ResourceQuota) (result // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *corev1.ResourceQuota) (*corev1.ResourceQuota, error) { +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts v1.UpdateOptions) (*corev1.ResourceQuota, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &corev1.ResourceQuota{}) @@ -113,7 +115,7 @@ func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *corev1.ResourceQuota) ( } // Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *FakeResourceQuotas) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{}) @@ -121,7 +123,7 @@ func (c *FakeResourceQuotas) Delete(name string, options *v1.DeleteOptions) erro } // DeleteCollection deletes a collection of objects. -func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.ResourceQuotaList{}) @@ -129,7 +131,7 @@ func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ResourceQuota, err error) { +func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &corev1.ResourceQuota{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index 47dba9eff4..38703f1468 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Reso var secretsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *FakeSecrets) Get(name string, options v1.GetOptions) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(secretsResource, c.ns, name), &corev1.Secret{}) @@ -50,7 +52,7 @@ func (c *FakeSecrets) Get(name string, options v1.GetOptions) (result *corev1.Se } // List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *FakeSecrets) List(opts v1.ListOptions) (result *corev1.SecretList, err error) { +func (c *FakeSecrets) List(ctx context.Context, opts v1.ListOptions) (result *corev1.SecretList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &corev1.SecretList{}) @@ -72,14 +74,14 @@ func (c *FakeSecrets) List(opts v1.ListOptions) (result *corev1.SecretList, err } // Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeSecrets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Create(secret *corev1.Secret) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Create(ctx context.Context, secret *corev1.Secret, opts v1.CreateOptions) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &corev1.Secret{}) @@ -90,7 +92,7 @@ func (c *FakeSecrets) Create(secret *corev1.Secret) (result *corev1.Secret, err } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Update(secret *corev1.Secret) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Update(ctx context.Context, secret *corev1.Secret, opts v1.UpdateOptions) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &corev1.Secret{}) @@ -101,7 +103,7 @@ func (c *FakeSecrets) Update(secret *corev1.Secret) (result *corev1.Secret, err } // Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *FakeSecrets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeSecrets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &corev1.Secret{}) @@ -109,7 +111,7 @@ func (c *FakeSecrets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeSecrets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.SecretList{}) @@ -117,7 +119,7 @@ func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1 } // Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) { +func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Secret, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &corev1.Secret{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index a65de49911..dd47bee64e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var servicesResource = schema.GroupVersionResource{Group: "", Version: "v1", Res var servicesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"} // Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *FakeServices) Get(name string, options v1.GetOptions) (result *corev1.Service, err error) { +func (c *FakeServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(servicesResource, c.ns, name), &corev1.Service{}) @@ -50,7 +52,7 @@ func (c *FakeServices) Get(name string, options v1.GetOptions) (result *corev1.S } // List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *FakeServices) List(opts v1.ListOptions) (result *corev1.ServiceList, err error) { +func (c *FakeServices) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ServiceList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &corev1.ServiceList{}) @@ -72,14 +74,14 @@ func (c *FakeServices) List(opts v1.ListOptions) (result *corev1.ServiceList, er } // Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Create(service *corev1.Service) (result *corev1.Service, err error) { +func (c *FakeServices) Create(ctx context.Context, service *corev1.Service, opts v1.CreateOptions) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &corev1.Service{}) @@ -90,7 +92,7 @@ func (c *FakeServices) Create(service *corev1.Service) (result *corev1.Service, } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Update(service *corev1.Service) (result *corev1.Service, err error) { +func (c *FakeServices) Update(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &corev1.Service{}) @@ -102,7 +104,7 @@ func (c *FakeServices) Update(service *corev1.Service) (result *corev1.Service, // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(service *corev1.Service) (*corev1.Service, error) { +func (c *FakeServices) UpdateStatus(ctx context.Context, service *corev1.Service, opts v1.UpdateOptions) (*corev1.Service, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &corev1.Service{}) @@ -113,7 +115,7 @@ func (c *FakeServices) UpdateStatus(service *corev1.Service) (*corev1.Service, e } // Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeServices) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &corev1.Service{}) @@ -121,7 +123,7 @@ func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { } // Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Service, err error) { +func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.Service, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &corev1.Service{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 5b6d8f8be5..78556d147d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -19,6 +19,9 @@ limitations under the License. package fake import ( + "context" + + authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +42,7 @@ var serviceaccountsResource = schema.GroupVersionResource{Group: "", Version: "v var serviceaccountsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"} // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *FakeServiceAccounts) Get(name string, options v1.GetOptions) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options v1.GetOptions) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{}) @@ -50,7 +53,7 @@ func (c *FakeServiceAccounts) Get(name string, options v1.GetOptions) (result *c } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *FakeServiceAccounts) List(opts v1.ListOptions) (result *corev1.ServiceAccountList, err error) { +func (c *FakeServiceAccounts) List(ctx context.Context, opts v1.ListOptions) (result *corev1.ServiceAccountList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &corev1.ServiceAccountList{}) @@ -72,14 +75,14 @@ func (c *FakeServiceAccounts) List(opts v1.ListOptions) (result *corev1.ServiceA } // Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeServiceAccounts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Create(serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.CreateOptions) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) @@ -90,7 +93,7 @@ func (c *FakeServiceAccounts) Create(serviceAccount *corev1.ServiceAccount) (res } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Update(serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts v1.UpdateOptions) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{}) @@ -101,7 +104,7 @@ func (c *FakeServiceAccounts) Update(serviceAccount *corev1.ServiceAccount) (res } // Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *FakeServiceAccounts) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{}) @@ -109,7 +112,7 @@ func (c *FakeServiceAccounts) Delete(name string, options *v1.DeleteOptions) err } // DeleteCollection deletes a collection of objects. -func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &corev1.ServiceAccountList{}) @@ -117,7 +120,7 @@ func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ServiceAccount, err error) { +func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &corev1.ServiceAccount{}) @@ -126,3 +129,14 @@ func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte } return obj.(*corev1.ServiceAccount), err } + +// CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. +func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts v1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*authenticationv1.TokenRequest), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go deleted file mode 100644 index a0efbcc2fe..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - authenticationv1 "k8s.io/api/authentication/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeServiceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { - obj, err := c.Fake.Invokes(core.NewCreateSubresourceAction(serviceaccountsResource, name, "token", c.ns, tr), &authenticationv1.TokenRequest{}) - - if obj == nil { - return nil, err - } - return obj.(*authenticationv1.TokenRequest), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go index 6e8591b12b..2cb81aa40b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -37,3 +37,5 @@ type ReplicationControllerExpansion interface{} type ResourceQuotaExpansion interface{} type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 2eeae11a83..c6ecda1f0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type LimitRangesGetter interface { // LimitRangeInterface has methods to work with LimitRange resources. type LimitRangeInterface interface { - Create(*v1.LimitRange) (*v1.LimitRange, error) - Update(*v1.LimitRange) (*v1.LimitRange, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.LimitRange, error) - List(opts metav1.ListOptions) (*v1.LimitRangeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error) + Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) LimitRangeExpansion } @@ -63,20 +64,20 @@ func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { } // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { +func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { +func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Post(). Namespace(c.ns). Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). Body(limitRange). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Put(). Namespace(c.ns). Resource("limitranges"). Name(limitRange.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(limitRange). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { +func (c *limitRanges) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *limitRanges) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { +func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} err = c.client.Patch(pt). Namespace(c.ns). Resource("limitranges"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 8a81fe8507..afd4237bfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type NamespacesGetter interface { // NamespaceInterface has methods to work with Namespace resources. type NamespaceInterface interface { - Create(*v1.Namespace) (*v1.Namespace, error) - Update(*v1.Namespace) (*v1.Namespace, error) - UpdateStatus(*v1.Namespace) (*v1.Namespace, error) - Delete(name string, options *metav1.DeleteOptions) error - Get(name string, options metav1.GetOptions) (*v1.Namespace, error) - List(opts metav1.ListOptions) (*v1.NamespaceList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) + Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) NamespaceExpansion } @@ -61,19 +62,19 @@ func newNamespaces(c *CoreV1Client) *namespaces { } // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Namespace, err error) { +func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Get(). Resource("namespaces"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { +func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,66 +100,69 @@ func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Post(). Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { +func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(namespace). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *metav1.DeleteOptions) error { +func (c *namespaces) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("namespaces"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { +func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { result = &v1.Namespace{} err = c.client.Patch(pt). Resource("namespaces"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index 17effe29c6..63482ace7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -16,7 +16,11 @@ limitations under the License. package v1 -import "k8s.io/api/core/v1" +import ( + "context" + + v1 "k8s.io/api/core/v1" +) // The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. type NamespaceExpansion interface { @@ -26,6 +30,6 @@ type NamespaceExpansion interface { // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do(context.TODO()).Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d19fab8952..f90657fd18 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,15 +38,15 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(*v1.Node) (*v1.Node, error) - Update(*v1.Node) (*v1.Node, error) - UpdateStatus(*v1.Node) (*v1.Node, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Node, error) - List(opts metav1.ListOptions) (*v1.NodeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) + Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) NodeExpansion } @@ -62,19 +63,19 @@ func newNodes(c *CoreV1Client) *nodes { } // Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, err error) { +func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Get(). Resource("nodes"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { +func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Post(). Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { +func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(node). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { +func (c *nodes) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("nodes"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *nodes) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions meta VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched node. -func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { +func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { result = &v1.Node{} err = c.client.Patch(pt). Resource("nodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index 5db29c3f74..bdf7bfed8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + "context" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -25,19 +27,19 @@ import ( type NodeExpansion interface { // PatchStatus modifies the status of an existing node. It returns the copy // of the node that the server returns, or an error. - PatchStatus(nodeName string, data []byte) (*v1.Node, error) + PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) } // PatchStatus modifies the status of an existing node. It returns the copy of // the node that the server returns, or an error. -func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { +func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { result := &v1.Node{} err := c.client.Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). Body(data). - Do(). + Do(ctx). Into(result) return result, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 74514825e9..9392aa0e74 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,15 +38,15 @@ type PersistentVolumesGetter interface { // PersistentVolumeInterface has methods to work with PersistentVolume resources. type PersistentVolumeInterface interface { - Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) - Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) - UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PersistentVolume, error) - List(opts metav1.ListOptions) (*v1.PersistentVolumeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) + Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) PersistentVolumeExpansion } @@ -62,19 +63,19 @@ func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { } // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Get(). Resource("persistentvolumes"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { +func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, err Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Post(). Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolume). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) error { +func (c *persistentVolumes) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("persistentvolumes"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *persistentVolumes) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, list VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { +func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} err = c.client.Patch(pt). Resource("persistentvolumes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 410ab37dcb..f91db21e82 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,15 +38,15 @@ type PersistentVolumeClaimsGetter interface { // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. type PersistentVolumeClaimInterface interface { - Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error) - List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) + Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } @@ -64,20 +65,20 @@ func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVol } // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { +func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Post(). Namespace(c.ns). Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(persistentVolumeClaim.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(persistentVolumeClaim.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(persistentVolumeClaim). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptions) error { +func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { +func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("persistentvolumeclaims"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index feacd307f5..cc143a38a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,17 +38,17 @@ type PodsGetter interface { // PodInterface has methods to work with Pod resources. type PodInterface interface { - Create(*v1.Pod) (*v1.Pod, error) - Update(*v1.Pod) (*v1.Pod, error) - UpdateStatus(*v1.Pod) (*v1.Pod, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Pod, error) - List(opts metav1.ListOptions) (*v1.PodList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) - GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) - UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) + Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) + GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (*v1.EphemeralContainers, error) PodExpansion } @@ -67,20 +68,20 @@ func newPods(c *CoreV1Client, namespace string) *pods { } // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err error) { +func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { +func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -91,13 +92,13 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -108,63 +109,65 @@ func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Post(). Namespace(c.ns). Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(pod.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { +func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(pod.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(pod). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { +func (c *pods) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("pods"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *pods) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -175,26 +178,27 @@ func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched pod. -func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { +func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { result = &v1.Pod{} err = c.client.Patch(pt). Namespace(c.ns). Resource("pods"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. -func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { +func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { result = &v1.EphemeralContainers{} err = c.client.Get(). Namespace(c.ns). @@ -202,21 +206,22 @@ func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) Name(podName). SubResource("ephemeralcontainers"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. -func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (result *v1.EphemeralContainers, err error) { result = &v1.EphemeralContainers{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ephemeralContainers). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index ed876be8b5..e385adf55b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + "context" + + v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" @@ -32,11 +34,11 @@ type PodExpansion interface { // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). func (c *pods) Bind(binding *v1.Binding) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do(context.TODO()).Error() } func (c *pods) Evict(eviction *policy.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(context.TODO()).Error() } // Get constructs a request for getting the logs for a pod diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index 84d7c98059..22b427232e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type PodTemplatesGetter interface { // PodTemplateInterface has methods to work with PodTemplate resources. type PodTemplateInterface interface { - Create(*v1.PodTemplate) (*v1.PodTemplate, error) - Update(*v1.PodTemplate) (*v1.PodTemplate, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PodTemplate, error) - List(opts metav1.ListOptions) (*v1.PodTemplateList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error) + Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) PodTemplateExpansion } @@ -63,20 +64,20 @@ func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { } // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { +func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podTemplate). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("podtemplates"). Name(podTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podTemplate). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error { +func (c *podTemplates) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *podTemplates) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptio VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { +func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podtemplates"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index dd3182db65..1ecbdaf775 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -38,17 +39,17 @@ type ReplicationControllersGetter interface { // ReplicationControllerInterface has methods to work with ReplicationController resources. type ReplicationControllerInterface interface { - Create(*v1.ReplicationController) (*v1.ReplicationController, error) - Update(*v1.ReplicationController) (*v1.ReplicationController, error) - UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ReplicationController, error) - List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) + Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) + GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ReplicationControllerExpansion } @@ -68,20 +69,20 @@ func newReplicationControllers(c *CoreV1Client, namespace string) *replicationCo } // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { +func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -92,13 +93,13 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,63 +110,65 @@ func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Post(). Namespace(c.ns). Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationController.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationController.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicationController). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptions) error { +func (c *replicationControllers) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *replicationControllers) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -176,26 +179,27 @@ func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { +func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicationcontrollers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { +func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -203,21 +207,22 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti Name(replicationControllerName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { +func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 5a178990ec..12dc525eeb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,15 +38,15 @@ type ResourceQuotasGetter interface { // ResourceQuotaInterface has methods to work with ResourceQuota resources. type ResourceQuotaInterface interface { - Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) - Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) - UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ResourceQuota, error) - List(opts metav1.ListOptions) (*v1.ResourceQuotaList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) + Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) ResourceQuotaExpansion } @@ -64,20 +65,20 @@ func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { } // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { +func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Post(). Namespace(c.ns). Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). Name(resourceQuota.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). Name(resourceQuota.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(resourceQuota). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) error { +func (c *resourceQuotas) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *resourceQuotas) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOpt VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { +func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourcequotas"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 85c143b173..fb9ff7fb32 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type SecretsGetter interface { // SecretInterface has methods to work with Secret resources. type SecretInterface interface { - Create(*v1.Secret) (*v1.Secret, error) - Update(*v1.Secret) (*v1.Secret, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Secret, error) - List(opts metav1.ListOptions) (*v1.SecretList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error) + Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) SecretExpansion } @@ -63,20 +64,20 @@ func newSecrets(c *CoreV1Client, namespace string) *secrets { } // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret, err error) { +func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { +func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { +func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Post(). Namespace(c.ns). Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(secret). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { +func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Put(). Namespace(c.ns). Resource("secrets"). Name(secret.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(secret). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { +func (c *secrets) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("secrets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *secrets) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions me VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { +func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { result = &v1.Secret{} err = c.client.Patch(pt). Namespace(c.ns). Resource("secrets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index b0e09413ef..56630c7cdf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -37,14 +38,14 @@ type ServicesGetter interface { // ServiceInterface has methods to work with Service resources. type ServiceInterface interface { - Create(*v1.Service) (*v1.Service, error) - Update(*v1.Service) (*v1.Service, error) - UpdateStatus(*v1.Service) (*v1.Service, error) - Delete(name string, options *metav1.DeleteOptions) error - Get(name string, options metav1.GetOptions) (*v1.Service, error) - List(opts metav1.ListOptions) (*v1.ServiceList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) + Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) ServiceExpansion } @@ -63,20 +64,20 @@ func newServices(c *CoreV1Client, namespace string) *services { } // Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) { +func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Get(). Namespace(c.ns). Resource("services"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { +func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,71 +105,74 @@ func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { +func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Post(). Namespace(c.ns). Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { +func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). Name(service.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { +func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). Name(service.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(service). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *metav1.DeleteOptions) error { +func (c *services) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("services"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { +func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { result = &v1.Service{} err = c.client.Patch(pt). Namespace(c.ns). Resource("services"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index 50af6a21c9..671c3726f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,8 +19,10 @@ limitations under the License. package v1 import ( + "context" "time" + authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -37,14 +39,16 @@ type ServiceAccountsGetter interface { // ServiceAccountInterface has methods to work with ServiceAccount resources. type ServiceAccountInterface interface { - Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) - Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ServiceAccount, error) - List(opts metav1.ListOptions) (*v1.ServiceAccountList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) + Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error) + Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) + CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error) + ServiceAccountExpansion } @@ -63,20 +67,20 @@ func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { } // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { +func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +91,13 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +108,49 @@ func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Post(). Namespace(c.ns). Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). Body(serviceAccount). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Put(). Namespace(c.ns). Resource("serviceaccounts"). Name(serviceAccount.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(serviceAccount). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) error { +func (c *serviceAccounts) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *serviceAccounts) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +161,36 @@ func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { +func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} err = c.client.Patch(pt). Namespace(c.ns). Resource("serviceaccounts"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). + Into(result) + return +} + +// CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. +func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { + result = &authenticationv1.TokenRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccountName). + SubResource("token"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(tokenRequest). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go deleted file mode 100644 index eaf643f154..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" -) - -// The ServiceAccountExpansion interface allows manually adding extra methods -// to the ServiceAccountInterface. -type ServiceAccountExpansion interface { - CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) -} - -// CreateToken creates a new token for a serviceaccount. -func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { - result := &authenticationv1.TokenRequest{} - err := c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - SubResource("token"). - Name(name). - Body(tr). - Do(). - Into(result) - return result, err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go index 417514761f..6f39aa7141 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/discovery/v1alpha1" @@ -37,14 +38,14 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) - Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) - List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (*v1alpha1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (*v1alpha1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) EndpointSliceExpansion } @@ -63,20 +64,20 @@ func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSl } // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Get(). Namespace(c.ns). Resource("endpointslices"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { +func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSli Resource("endpointslices"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("endpointslices"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Post(). Namespace(c.ns). Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Put(). Namespace(c.ns). Resource("endpointslices"). Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { +func (c *endpointSlices) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("endpointslices"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *endpointSlices) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { result = &v1alpha1.EndpointSlice{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpointslices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go index 9d17306a46..61d6a5873f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/discovery/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.i var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1alpha1", Kind: "EndpointSlice"} // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1alpha1.EndpointSlice{}) @@ -50,7 +52,7 @@ func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1 } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { +func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1alpha1.EndpointSliceList{}) @@ -72,14 +74,14 @@ func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1alpha1.Endpoin } // Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) @@ -90,7 +92,7 @@ func (c *FakeEndpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (resu } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) @@ -101,7 +103,7 @@ func (c *FakeEndpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (resu } // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1alpha1.EndpointSlice{}) @@ -109,7 +111,7 @@ func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) erro } // DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.EndpointSliceList{}) @@ -117,7 +119,7 @@ func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.EndpointSlice{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index bba656b8fd..2989fc953b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/discovery/v1beta1" @@ -37,14 +38,14 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) - Update(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error) - List(opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) + Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) EndpointSliceExpansion } @@ -63,20 +64,20 @@ func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSli } // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Get(). Namespace(c.ns). Resource("endpointslices"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { +func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSlic Resource("endpointslices"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("endpointslices"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Post(). Namespace(c.ns). Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Put(). Namespace(c.ns). Resource("endpointslices"). Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(endpointSlice). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { +func (c *endpointSlices) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("endpointslices"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *endpointSlices) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { result = &v1beta1.EndpointSlice{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpointslices"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index ba13afea41..c67678331e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.i var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSlice"} // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) @@ -50,7 +52,7 @@ func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1 } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { +func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) @@ -72,14 +74,14 @@ func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1beta1.Endpoint } // Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) @@ -90,7 +92,7 @@ func (c *FakeEndpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (resul } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) @@ -101,7 +103,7 @@ func (c *FakeEndpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (resul } // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) @@ -109,7 +111,7 @@ func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) erro } // DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) @@ -117,7 +119,7 @@ func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 143281b25c..22eed88007 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/events/v1beta1" @@ -37,14 +38,14 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(*v1beta1.Event) (*v1beta1.Event, error) - Update(*v1beta1.Event) (*v1beta1.Event, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Event, error) - List(opts v1.ListOptions) (*v1beta1.EventList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error) + Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) EventExpansion } @@ -63,20 +64,20 @@ func newEvents(c *EventsV1beta1Client, namespace string) *events { } // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { +func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Get(). Namespace(c.ns). Resource("events"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { +func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Post(). Namespace(c.ns). Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Put(). Namespace(c.ns). Resource("events"). Name(event.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(event). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *v1.DeleteOptions) error { +func (c *events) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("events"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *events) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { +func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { result = &v1beta1.Event{} err = c.client.Patch(pt). Namespace(c.ns). Resource("events"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 312ee4283d..e0ae41dfe7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + "context" "fmt" "k8s.io/api/events/v1beta1" @@ -51,7 +52,7 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). - Do(). + Do(context.TODO()). Into(result) return result, err } @@ -72,7 +73,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, Resource("events"). Name(event.Name). Body(event). - Do(). + Do(context.TODO()). Into(result) return result, err } @@ -92,7 +93,7 @@ func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1 Resource("events"). Name(event.Name). Body(data). - Do(). + Do(context.TODO()). Into(result) return result, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index ef76ec1318..41c6865abe 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var eventsResource = schema.GroupVersionResource{Group: "events.k8s.io", Version var eventsKind = schema.GroupVersionKind{Group: "events.k8s.io", Version: "v1beta1", Kind: "Event"} // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{}) @@ -50,7 +52,7 @@ func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *v1beta1.Ev } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { +func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{}) @@ -72,14 +74,14 @@ func (c *FakeEvents) List(opts v1.ListOptions) (result *v1beta1.EventList, err e } // Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) @@ -90,7 +92,7 @@ func (c *FakeEvents) Create(event *v1beta1.Event) (result *v1beta1.Event, err er } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) @@ -101,7 +103,7 @@ func (c *FakeEvents) Update(event *v1beta1.Event) (result *v1beta1.Event, err er } // Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeEvents) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1beta1.Event{}) @@ -109,7 +111,7 @@ func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeEvents) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) @@ -117,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { +func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 93b1ae9b6d..d1aa54c989 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,15 +38,15 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) - List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) + Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) DaemonSetExpansion } @@ -64,20 +65,20 @@ func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { } // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { +func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Post(). Namespace(c.ns). Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Put(). Namespace(c.ns). Resource("daemonsets"). Name(daemonSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(daemonSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *daemonSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *daemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1. VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { +func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("daemonsets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 5557b9f2b1..70c07fa964 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,17 +38,17 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) - List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) + Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) + GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) DeploymentExpansion } @@ -67,20 +68,20 @@ func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { } // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { +func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { +func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -91,13 +92,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -108,63 +109,65 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Post(). Namespace(c.ns). Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deployment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(deployment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *deployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("deployments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *deployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -175,26 +178,27 @@ func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} err = c.client.Patch(pt). Namespace(c.ns). Resource("deployments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -202,21 +206,22 @@ func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (re Name(deploymentName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). Name(deploymentName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 24734be6a6..1b3df8c034 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -16,7 +16,11 @@ limitations under the License. package v1beta1 -import "k8s.io/api/extensions/v1beta1" +import ( + "context" + + "k8s.io/api/extensions/v1beta1" +) // The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. type DeploymentExpansion interface { @@ -25,5 +29,5 @@ type DeploymentExpansion interface { // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do(context.TODO()).Error() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 4c98660607..dfce4a041d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var daemonsetsResource = schema.GroupVersionResource{Group: "extensions", Versio var daemonsetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "DaemonSet"} // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) @@ -50,7 +52,7 @@ func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { +func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{}) @@ -72,14 +74,14 @@ func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetLis } // Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -90,7 +92,7 @@ func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.D } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -102,7 +104,7 @@ func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.D // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) @@ -113,7 +115,7 @@ func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.Da } // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDaemonSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) @@ -121,7 +123,7 @@ func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) @@ -129,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { +func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index 7b7df45cc3..737f690e7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var deploymentsResource = schema.GroupVersionResource{Group: "extensions", Versi var deploymentsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Deployment"} // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -50,7 +52,7 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1bet } // List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { +func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) @@ -72,14 +74,14 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentL } // Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -90,7 +92,7 @@ func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) @@ -102,7 +104,7 @@ func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) @@ -113,7 +115,7 @@ func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1 } // Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeDeployments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -121,7 +123,7 @@ func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeDeployments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) @@ -129,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { +func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) @@ -140,7 +142,7 @@ func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, su } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{}) @@ -151,7 +153,7 @@ func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 01c2521401..4dc0f1694e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var ingressesResource = schema.GroupVersionResource{Group: "extensions", Version var ingressesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"} // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) @@ -50,7 +52,7 @@ func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1 } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { +func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) @@ -72,14 +74,14 @@ func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, } // Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -90,7 +92,7 @@ func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingres } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -102,7 +104,7 @@ func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingres // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) @@ -113,7 +115,7 @@ func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress } // Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeIngresses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) @@ -121,7 +123,7 @@ func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeIngresses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) @@ -129,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index 7f4d4a5554..5997132d62 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var networkpoliciesResource = schema.GroupVersionResource{Group: "extensions", V var networkpoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "NetworkPolicy"} // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) @@ -50,7 +52,7 @@ func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *v } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { +func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1beta1.NetworkPolicyList{}) @@ -72,14 +74,14 @@ func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *v1beta1.Network } // Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) @@ -90,7 +92,7 @@ func (c *FakeNetworkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (resu } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) @@ -101,7 +103,7 @@ func (c *FakeNetworkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (resu } // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) @@ -109,7 +111,7 @@ func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) err } // DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{}) @@ -117,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index b97a34416e..5ade0028ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "extensions var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "PodSecurityPolicy"} // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { +func (c *FakePodSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.Pod } // Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityP } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityP } // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) @@ -110,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 7ed16af904..48e1f62907 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var replicasetsResource = schema.GroupVersionResource{Group: "extensions", Versi var replicasetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"} // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) @@ -50,7 +52,7 @@ func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1bet } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { +func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{}) @@ -72,14 +74,14 @@ func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetL } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -90,7 +92,7 @@ func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -102,7 +104,7 @@ func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) @@ -113,7 +115,7 @@ func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1 } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeReplicaSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) @@ -121,7 +123,7 @@ func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) @@ -129,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { +func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) @@ -140,7 +142,7 @@ func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, su } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{}) @@ -151,7 +153,7 @@ func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 4da51c3685..7c162c67c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,15 +38,15 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) + Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) IngressExpansion } @@ -64,20 +65,20 @@ func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { } // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { +func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Post(). Namespace(c.ns). Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { +func (c *ingresses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *ingresses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Patch(pt). Namespace(c.ns). Resource("ingresses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index 0607e2dd48..92ff833105 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,14 +38,14 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) - List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) + Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -63,20 +64,20 @@ func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPo } // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { +func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPoli Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Post(). Namespace(c.ns). Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Put(). Namespace(c.ns). Resource("networkpolicies"). Name(networkPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *networkPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *networkPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { +func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { result = &v1beta1.NetworkPolicy{} err = c.client.Patch(pt). Namespace(c.ns). Resource("networkpolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index a947a54a6f..e57b8de71a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,14 +38,14 @@ type PodSecurityPoliciesGetter interface { // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. type PodSecurityPolicyInterface interface { - Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) + Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) PodSecurityPolicyExpansion } @@ -61,19 +62,19 @@ func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { } // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Get(). Resource("podsecuritypolicies"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { +func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Post(). Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Put(). Resource("podsecuritypolicies"). Name(podSecurityPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *podSecurityPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("podsecuritypolicies"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Patch(pt). Resource("podsecuritypolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 444029058b..9db0cbfdba 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/extensions/v1beta1" @@ -37,17 +38,17 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) - List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) + GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) ReplicaSetExpansion } @@ -67,20 +68,20 @@ func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { } // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { +func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -91,13 +92,13 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -108,63 +109,65 @@ func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Post(). Namespace(c.ns). Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSet.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(replicaSet). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { +func (c *replicaSets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *replicaSets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -175,26 +178,27 @@ func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { +func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicasets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Get(). Namespace(c.ns). @@ -202,21 +206,22 @@ func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (re Name(replicaSetName). SubResource("scale"). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). Body(scale). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go index d27f802e1b..f3dd97f195 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiser var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "FlowSchema"} // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeFlowSchemas) Get(name string, options v1.GetOptions) (result *v1alp } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1alpha1.FlowSchemaList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeFlowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchema } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeFlowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alp } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeFlowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alp // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1alpha1.FlowSchema{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeFlowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (*v1alph } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.FlowSchemaList{}) @@ -121,7 +123,7 @@ func (c *FakeFlowSchemas) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1alpha1.FlowSchema{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go index 960481c286..d5b4e7998b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "fl var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "PriorityLevelConfiguration"} // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePriorityLevelConfigurations) Get(name string, options v1.GetOptions } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1alpha1.PriorityLevelConfigurationList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePriorityLevelConfigurations) List(opts v1.ListOptions) (result *v1a } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePriorityLevelConfigurations) Create(priorityLevelConfiguration *v1a } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakePriorityLevelConfigurations) Update(priorityLevelConfiguration *v1a // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakePriorityLevelConfigurations) UpdateStatus(priorityLevelConfiguratio } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.PriorityLevelConfigurationList{}) @@ -121,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(options *v1.DeleteOpt } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1alpha1.PriorityLevelConfiguration{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go index db71d29a57..ca209f0979 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -37,15 +38,15 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - Update(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - UpdateStatus(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.FlowSchema, error) - List(opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) + Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) FlowSchemaExpansion } @@ -62,19 +63,19 @@ func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { } // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Get(). Resource("flowschemas"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Post(). Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *flowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(flowSchema). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(name string, options *v1.DeleteOptions) error { +func (c *flowSchemas) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("flowschemas"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *flowSchemas) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *flowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1 VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { result = &v1alpha1.FlowSchema{} err = c.client.Patch(pt). Resource("flowschemas"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go index eb99cca602..413fea4bc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -37,15 +38,15 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - Update(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - UpdateStatus(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) + Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } @@ -62,19 +63,19 @@ func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevel } // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Get(). Resource("prioritylevelconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interfac Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Post(). Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *priorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityLevelConfiguration). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error { +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("prioritylevelconfigurations"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *priorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Patch(pt). Resource("prioritylevelconfigurations"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 58667c481a..c251b66751 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + networkingv1 "k8s.io/api/networking/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var networkpoliciesResource = schema.GroupVersionResource{Group: "networking.k8s var networkpoliciesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"} // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{}) @@ -50,7 +52,7 @@ func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *n } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *networkingv1.NetworkPolicyList, err error) { +func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *networkingv1.NetworkPolicyList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &networkingv1.NetworkPolicyList{}) @@ -72,14 +74,14 @@ func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *networkingv1.Ne } // Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.CreateOptions) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) @@ -90,7 +92,7 @@ func (c *FakeNetworkPolicies) Create(networkPolicy *networkingv1.NetworkPolicy) } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts v1.UpdateOptions) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{}) @@ -101,7 +103,7 @@ func (c *FakeNetworkPolicies) Update(networkPolicy *networkingv1.NetworkPolicy) } // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{}) @@ -109,7 +111,7 @@ func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) err } // DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &networkingv1.NetworkPolicyList{}) @@ -117,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &networkingv1.NetworkPolicy{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 3f39be957d..4fa1986038 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/networking/v1" @@ -37,14 +38,14 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.NetworkPolicy, error) - List(opts metav1.ListOptions) (*v1.NetworkPolicyList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) + Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -63,20 +64,20 @@ func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicie } // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { +func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Post(). Namespace(c.ns). Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Put(). Namespace(c.ns). Resource("networkpolicies"). Name(networkPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(networkPolicy). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) error { +func (c *networkPolicies) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *networkPolicies) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) { +func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { result = &v1.NetworkPolicy{} err = c.client.Patch(pt). Namespace(c.ns). Resource("networkpolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index ee7821778e..130d037d56 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var ingressesResource = schema.GroupVersionResource{Group: "networking.k8s.io", var ingressesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1beta1", Kind: "Ingress"} // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) @@ -50,7 +52,7 @@ func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1 } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { +func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) @@ -72,14 +74,14 @@ func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, } // Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -90,7 +92,7 @@ func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingres } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) @@ -102,7 +104,7 @@ func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingres // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) @@ -113,7 +115,7 @@ func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress } // Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeIngresses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) @@ -121,7 +123,7 @@ func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeIngresses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) @@ -129,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index 8d76678f16..b8420c0dc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/networking/v1beta1" @@ -37,15 +38,15 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) + Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) IngressExpansion } @@ -64,20 +65,20 @@ func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { } // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { +func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Post(). Namespace(c.ns). Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { +func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). Resource("ingresses"). Name(ingress.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(ingress). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { +func (c *ingresses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *ingresses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { +func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} err = c.client.Patch(pt). Namespace(c.ns). Resource("ingresses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 3c8b00986f..1495ba72a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", V var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1alpha1", Kind: "RuntimeClass"} // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1 } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { +func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1alpha1.Runtime } // Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeRuntimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeRuntimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result } // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) @@ -110,7 +112,7 @@ func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 044460ec0b..c60f24211a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/node/v1alpha1" @@ -37,14 +38,14 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) RuntimeClassExpansion } @@ -61,19 +62,19 @@ func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { } // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Get(). Resource("runtimeclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { +func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClas Resource("runtimeclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("runtimeclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Post(). Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Put(). Resource("runtimeclasses"). Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *runtimeClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("runtimeclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *runtimeClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { result = &v1alpha1.RuntimeClass{} err = c.client.Patch(pt). Resource("runtimeclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index 201d742667..1eef57f96e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", V var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1beta1", Kind: "RuntimeClass"} // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1 } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { +func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeC } // Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeRuntimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeRuntimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result } // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) @@ -110,7 +112,7 @@ func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { +func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index b3f7c497fb..7cdd58f292 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/node/v1beta1" @@ -37,14 +38,14 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) RuntimeClassExpansion } @@ -61,19 +62,19 @@ func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { } // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Get(). Resource("runtimeclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { +func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClass Resource("runtimeclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("runtimeclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Post(). Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Put(). Resource("runtimeclasses"). Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(runtimeClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *runtimeClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("runtimeclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *runtimeClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { result = &v1beta1.RuntimeClass{} err = c.client.Patch(pt). Resource("runtimeclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index 40bad265f0..9fd2eb8f28 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "context" + policy "k8s.io/api/policy/v1beta1" ) @@ -33,6 +35,6 @@ func (c *evictions) Evict(eviction *policy.Eviction) error { Name(eviction.Name). SubResource("eviction"). Body(eviction). - Do(). + Do(context.TODO()). Error() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 5bfbbca47f..bd7a82819c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"} // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) @@ -50,7 +52,7 @@ func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (resu } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { +func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) @@ -72,14 +74,14 @@ func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.Po } // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -90,7 +92,7 @@ func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisrup } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -102,7 +104,7 @@ func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisrup // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) @@ -113,7 +115,7 @@ func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.Pod } // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) @@ -121,7 +123,7 @@ func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions } // DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) @@ -129,7 +131,7 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, l } // Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index 32d1989f33..a1f850d2be 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "policy", V var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { +func (c *FakePodSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.Pod } // Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityP } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityP } // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) @@ -110,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 864af9a262..e82a6d20e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/policy/v1beta1" @@ -37,15 +38,15 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) - List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } @@ -64,20 +65,20 @@ func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisru } // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { +func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -88,13 +89,13 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -105,63 +106,65 @@ func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, erro Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Post(). Namespace(c.ns). Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Put(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(podDisruptionBudget.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Put(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(podDisruptionBudget.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podDisruptionBudget). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { +func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -172,20 +175,21 @@ func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listO VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { +func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} err = c.client.Patch(pt). Namespace(c.ns). Resource("poddisruptionbudgets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index d02096d747..7cadf26351 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/policy/v1beta1" @@ -37,14 +38,14 @@ type PodSecurityPoliciesGetter interface { // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. type PodSecurityPolicyInterface interface { - Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) + Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) PodSecurityPolicyExpansion } @@ -61,19 +62,19 @@ func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { } // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Get(). Resource("podsecuritypolicies"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { +func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Post(). Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Put(). Resource("podsecuritypolicies"). Name(podSecurityPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podSecurityPolicy). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { +func (c *podSecurityPolicies) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("podsecuritypolicies"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { +func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} err = c.client.Patch(pt). Resource("podsecuritypolicies"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 0a47c44115..467c6ddb02 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/rbac/v1" @@ -37,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(*v1.ClusterRole) (*v1.ClusterRole, error) - Update(*v1.ClusterRole) (*v1.ClusterRole, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ClusterRole, error) - List(opts metav1.ListOptions) (*v1.ClusterRoleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) + Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) ClusterRoleExpansion } @@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1Client) *clusterRoles { } // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Get(). Resource("clusterroles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { +func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error { +func (c *clusterRoles) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("clusterroles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *clusterRoles) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptio VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { result = &v1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index c16ebc3122..19ace55f51 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/rbac/v1" @@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error) - List(opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) + Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { } // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Get(). Resource("clusterrolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { +func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, e Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) error { +func (c *clusterRoleBindings) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("clusterrolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, li VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { result = &v1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index d57f339390..2eec9defda 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorizatio var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"} // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolesResource, name), &rbacv1.ClusterRole{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *rbac } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *rbacv1.ClusterRoleList, err error) { +func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.ClusterRoleList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &rbacv1.ClusterRoleList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *rbacv1.ClusterRole } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.CreateOptions) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoles) Create(clusterRole *rbacv1.ClusterRole) (result *rbac } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts v1.UpdateOptions) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoles) Update(clusterRole *rbacv1.ClusterRole) (result *rbac } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &rbacv1.ClusterRole{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &rbacv1.ClusterRole{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index 878473ef35..c7943659f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.autho var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"} // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *rbacv1.ClusterRoleBindingList, err error) { +func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.ClusterRoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &rbacv1.ClusterRoleBindingList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *rbacv1.Clus } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.CreateOptions) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbacv1.ClusterRoleB } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts v1.UpdateOptions) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbacv1.ClusterRoleB } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleBindingList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &rbacv1.ClusterRoleBinding{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index 78ef3192f3..1d3cee64ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.i var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"} // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolesResource, c.ns, name), &rbacv1.Role{}) @@ -50,7 +52,7 @@ func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *rbacv1.Role } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(opts v1.ListOptions) (result *rbacv1.RoleList, err error) { +func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.RoleList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &rbacv1.RoleList{}) @@ -72,14 +74,14 @@ func (c *FakeRoles) List(opts v1.ListOptions) (result *rbacv1.RoleList, err erro } // Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(role *rbacv1.Role) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *rbacv1.Role, opts v1.CreateOptions) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &rbacv1.Role{}) @@ -90,7 +92,7 @@ func (c *FakeRoles) Create(role *rbacv1.Role) (result *rbacv1.Role, err error) { } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(role *rbacv1.Role) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *rbacv1.Role, opts v1.UpdateOptions) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &rbacv1.Role{}) @@ -101,7 +103,7 @@ func (c *FakeRoles) Update(role *rbacv1.Role) (result *rbacv1.Role, err error) { } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &rbacv1.Role{}) @@ -109,7 +111,7 @@ func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &rbacv1.RoleList{}) @@ -117,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &rbacv1.Role{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index 6c344cadff..dcdc089751 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorizatio var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"} // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{}) @@ -50,7 +52,7 @@ func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *rbac } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *rbacv1.RoleBindingList, err error) { +func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *rbacv1.RoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &rbacv1.RoleBindingList{}) @@ -72,14 +74,14 @@ func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *rbacv1.RoleBinding } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.CreateOptions) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) @@ -90,7 +92,7 @@ func (c *FakeRoleBindings) Create(roleBinding *rbacv1.RoleBinding) (result *rbac } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts v1.UpdateOptions) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{}) @@ -101,7 +103,7 @@ func (c *FakeRoleBindings) Update(roleBinding *rbacv1.RoleBinding) (result *rbac } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{}) @@ -109,7 +111,7 @@ func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &rbacv1.RoleBindingList{}) @@ -117,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &rbacv1.RoleBinding{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index a17d791fff..64c08912b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/rbac/v1" @@ -37,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(*v1.Role) (*v1.Role, error) - Update(*v1.Role) (*v1.Role, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Role, error) - List(opts metav1.ListOptions) (*v1.RoleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) + Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error) + Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) RoleExpansion } @@ -63,20 +64,20 @@ func newRoles(c *RbacV1Client, namespace string) *roles { } // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, err error) { +func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { +func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { +func (c *roles) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("roles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *roles) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions meta VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { result = &v1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index c87e457188..70334e356c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/rbac/v1" @@ -37,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(*v1.RoleBinding) (*v1.RoleBinding, error) - Update(*v1.RoleBinding) (*v1.RoleBinding, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.RoleBinding, error) - List(opts metav1.ListOptions) (*v1.RoleBindingList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) + Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) RoleBindingExpansion } @@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { } // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { +func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error { +func (c *roleBindings) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *roleBindings) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptio VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { result = &v1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 77e66877e7..9397cd8d42 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -37,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) ClusterRoleExpansion } @@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { } // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Get(). Resource("clusterroles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { +func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *clusterRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("clusterroles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *clusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 0d1b9d2051..3d36d87b90 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { } // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Get(). Resource("clusterrolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { +func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *clusterRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("clusterrolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index d2d1b1c74c..966c601f3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorizatio var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRole"} // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1al } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { +func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRo } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1 } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1 } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 3e23e5f657..c1d43dcd88 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.autho var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"} // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { +func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cl } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRol } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRol } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 7bd52373fa..63579bbbb7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.i var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "Role"} // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) @@ -50,7 +52,7 @@ func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Ro } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { +func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{}) @@ -72,14 +74,14 @@ func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err er } // Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) @@ -90,7 +92,7 @@ func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err erro } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) @@ -101,7 +103,7 @@ func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err erro } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) @@ -109,7 +111,7 @@ func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) @@ -117,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 0150503115..21bbb6c8dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorizatio var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBinding"} // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) @@ -50,7 +52,7 @@ func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1al } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { +func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{}) @@ -72,14 +74,14 @@ func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindi } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) @@ -90,7 +92,7 @@ func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1 } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) @@ -101,7 +103,7 @@ func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1 } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) @@ -109,7 +111,7 @@ func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) @@ -117,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 4a4b67240b..b2bd377961 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -37,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(*v1alpha1.Role) (*v1alpha1.Role, error) - Update(*v1alpha1.Role) (*v1alpha1.Role, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) - List(opts v1.ListOptions) (*v1alpha1.RoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error) + Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) RoleExpansion } @@ -63,20 +64,20 @@ func newRoles(c *RbacV1alpha1Client, namespace string) *roles { } // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { +func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { +func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *v1.DeleteOptions) error { +func (c *roles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("roles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *roles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListO VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index bf4e5a10ef..859515bf09 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -37,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) - List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) RoleBindingExpansion } @@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { } // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { +func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *roleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *roleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index 21d3cab373..803b892e74 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/rbac/v1beta1" @@ -37,14 +38,14 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) - List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error) + Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) ClusterRoleExpansion } @@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { } // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Get(). Resource("clusterroles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { +func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Post(). Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Put(). Resource("clusterroles"). Name(clusterRole.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRole). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *clusterRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("clusterroles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *clusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { +func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} err = c.client.Patch(pt). Resource("clusterroles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 47eb9e4e77..81e07f3322 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/rbac/v1beta1" @@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) - List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } @@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { } // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Get(). Resource("clusterrolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { +func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Post(). Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Put(). Resource("clusterrolebindings"). Name(clusterRoleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(clusterRoleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *clusterRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("clusterrolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} err = c.client.Patch(pt). Resource("clusterrolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 2dbc3f6166..48d437caba 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorizatio var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRole"} // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1be } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { +func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRol } // Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1b } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1b } // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { +func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 14e20bc28c..b20a6fa6cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.autho var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBinding"} // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (resul } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { +func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Clu } // Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRole } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRole } // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) @@ -110,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li } // Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { +func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index e31768e4e5..eec0ef6f89 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.i var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"} // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) @@ -50,7 +52,7 @@ func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Rol } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { +func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{}) @@ -72,14 +74,14 @@ func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err err } // Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) @@ -90,7 +92,7 @@ func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) @@ -101,7 +103,7 @@ func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{}) @@ -109,7 +111,7 @@ func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) @@ -117,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L } // Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { +func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 06b93c93f6..80f9638bcb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorizatio var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBinding"} // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) @@ -50,7 +52,7 @@ func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1be } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { +func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{}) @@ -72,14 +74,14 @@ func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindin } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) @@ -90,7 +92,7 @@ func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1b } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) @@ -101,7 +103,7 @@ func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1b } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeRoleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) @@ -109,7 +111,7 @@ func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) @@ -117,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio } // Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { +func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 2b61aad523..3a0233265c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/rbac/v1beta1" @@ -37,14 +38,14 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(*v1beta1.Role) (*v1beta1.Role, error) - Update(*v1beta1.Role) (*v1beta1.Role, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Role, error) - List(opts v1.ListOptions) (*v1beta1.RoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error) + Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) RoleExpansion } @@ -63,20 +64,20 @@ func newRoles(c *RbacV1beta1Client, namespace string) *roles { } // Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { +func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { +func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { +func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(role). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *v1.DeleteOptions) error { +func (c *roles) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("roles"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *roles) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListO VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { +func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 0bd118fdfe..a0cb0763d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/rbac/v1beta1" @@ -37,14 +38,14 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) - List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error) + Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) RoleBindingExpansion } @@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { } // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { +func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Post(). Namespace(c.ns). Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Put(). Namespace(c.ns). Resource("rolebindings"). Name(roleBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(roleBinding). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { +func (c *roleBindings) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *roleBindings) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { +func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} err = c.client.Patch(pt). Namespace(c.ns). Resource("rolebindings"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index 60ad3a8db2..05c2d7ae66 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + schedulingv1 "k8s.io/api/scheduling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1", Kind: "PriorityClass"} // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(priorityclassesResource, name), &schedulingv1.PriorityClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *s } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *schedulingv1.PriorityClassList, err error) { +func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *schedulingv1.PriorityClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &schedulingv1.PriorityClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *schedulingv1.Pr } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(priorityClass *schedulingv1.PriorityClass) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.CreateOptions) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePriorityClasses) Create(priorityClass *schedulingv1.PriorityClass) } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(priorityClass *schedulingv1.PriorityClass) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts v1.UpdateOptions) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &schedulingv1.PriorityClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakePriorityClasses) Update(priorityClass *schedulingv1.PriorityClass) } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePriorityClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &schedulingv1.PriorityClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions) _, err := c.Fake.Invokes(action, &schedulingv1.PriorityClassList{}) @@ -110,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *schedulingv1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &schedulingv1.PriorityClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index 3abbb7b8eb..394adf4460 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/scheduling/v1" @@ -37,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(*v1.PriorityClass) (*v1.PriorityClass, error) - Update(*v1.PriorityClass) (*v1.PriorityClass, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) - List(opts metav1.ListOptions) (*v1.PriorityClassList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) + Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) PriorityClassExpansion } @@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { } // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Get(). Resource("priorityclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { +func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClas Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { +func (c *priorityClasses) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("priorityclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *priorityClasses) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOp VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { result = &v1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index e592ed137f..8b264e07ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClass"} // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { +func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1alpha1.Priori } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePriorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (res } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakePriorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (res } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePriorityClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) @@ -110,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 29d646fb1f..74a9b44ed5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/scheduling/v1alpha1" @@ -37,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error) - List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) + Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) PriorityClassExpansion } @@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { } // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Get(). Resource("priorityclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { +func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *priorityClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("priorityclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *priorityClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { result = &v1alpha1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index 44ce64b5ce..7381e63450 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClass"} // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { +func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1beta1.Priorit } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakePriorityClasses) Create(priorityClass *v1beta1.PriorityClass) (resu } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakePriorityClasses) Update(priorityClass *v1beta1.PriorityClass) (resu } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePriorityClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) @@ -110,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp } // Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { +func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 5e402f8e34..80db5299b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/scheduling/v1beta1" @@ -37,14 +38,14 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error) - List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) + Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error) + Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) PriorityClassExpansion } @@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { } // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Get(). Resource("priorityclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { +func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Post(). Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Put(). Resource("priorityclasses"). Name(priorityClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(priorityClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *priorityClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("priorityclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *priorityClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOption VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { +func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { result = &v1beta1.PriorityClass{} err = c.client.Patch(pt). Resource("priorityclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index 273a027fad..00dc34d2c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -39,7 +41,7 @@ var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", V var podpresetsKind = schema.GroupVersionKind{Group: "settings.k8s.io", Version: "v1alpha1", Kind: "PodPreset"} // Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) @@ -50,7 +52,7 @@ func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alph } // List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { +func (c *FakePodPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(podpresetsResource, podpresetsKind, c.ns, opts), &v1alpha1.PodPresetList{}) @@ -72,14 +74,14 @@ func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetLi } // Watch returns a watch.Interface that watches the requested podPresets. -func (c *FakePodPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePodPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podpresetsResource, c.ns, opts)) } // Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) @@ -90,7 +92,7 @@ func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1 } // Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) @@ -101,7 +103,7 @@ func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1 } // Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePodPresets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) @@ -109,7 +111,7 @@ func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakePodPresets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) @@ -117,7 +119,7 @@ func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched podPreset. -func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { +func (c *FakePodPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index 8fd6adc56b..01eaa9a82c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/settings/v1alpha1" @@ -37,14 +38,14 @@ type PodPresetsGetter interface { // PodPresetInterface has methods to work with PodPreset resources. type PodPresetInterface interface { - Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) - List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error) + Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) PodPresetExpansion } @@ -63,20 +64,20 @@ func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { } // Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { +func (c *podPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -87,13 +88,13 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -104,47 +105,49 @@ func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Post(). Namespace(c.ns). Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). Body(podPreset). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Put(). Namespace(c.ns). Resource("podpresets"). Name(podPreset.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(podPreset). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { +func (c *podPresets) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *podPresets) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -155,20 +158,21 @@ func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1. VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { +func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podpresets"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 209054175a..6ecde5933c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/storage/v1" @@ -37,14 +38,14 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(*v1.CSINode) (*v1.CSINode, error) - Update(*v1.CSINode) (*v1.CSINode, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.CSINode, error) - List(opts metav1.ListOptions) (*v1.CSINodeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) + Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error) + Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) CSINodeExpansion } @@ -61,19 +62,19 @@ func newCSINodes(c *StorageV1Client) *cSINodes { } // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(name string, options metav1.GetOptions) (result *v1.CSINode, err error) { +func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Get(). Resource("csinodes"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err error) { +func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err er Resource("csinodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("csinodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(cSINode *v1.CSINode) (result *v1.CSINode, err error) { +func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Post(). Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(cSINode *v1.CSINode) (result *v1.CSINode, err error) { +func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Put(). Resource("csinodes"). Name(cSINode.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(name string, options *metav1.DeleteOptions) error { +func (c *cSINodes) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("csinodes"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *cSINodes) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *cSINodes) DeleteCollection(options *metav1.DeleteOptions, listOptions m VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) { +func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { result = &v1.CSINode{} err = c.client.Patch(pt). Resource("csinodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 87ce349a5c..94452160de 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + storagev1 "k8s.io/api/storage/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var csinodesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Vers var csinodesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSINode"} // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(name string, options v1.GetOptions) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(csinodesResource, name), &storagev1.CSINode{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeCSINodes) Get(name string, options v1.GetOptions) (result *storagev } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(opts v1.ListOptions) (result *storagev1.CSINodeList, err error) { +func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.CSINodeList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &storagev1.CSINodeList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeCSINodes) List(opts v1.ListOptions) (result *storagev1.CSINodeList, } // Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Create(ctx context.Context, cSINode *storagev1.CSINode, opts v1.CreateOptions) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &storagev1.CSINode{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeCSINodes) Create(cSINode *storagev1.CSINode) (result *storagev1.CSI } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(cSINode *storagev1.CSINode) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Update(ctx context.Context, cSINode *storagev1.CSINode, opts v1.UpdateOptions) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &storagev1.CSINode{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeCSINodes) Update(cSINode *storagev1.CSINode) (result *storagev1.CSI } // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCSINodes) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(csinodesResource, name), &storagev1.CSINode{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(csinodesResource, listOptions) _, err := c.Fake.Invokes(action, &storagev1.CSINodeList{}) @@ -110,7 +112,7 @@ func (c *FakeCSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v } // Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.CSINode, err error) { +func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &storagev1.CSINode{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index c7531d8793..2d5d1f2c76 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + storagev1 "k8s.io/api/storage/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io" var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"} // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(storageclassesResource, name), &storagev1.StorageClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *st } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *storagev1.StorageClassList, err error) { +func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.StorageClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &storagev1.StorageClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *storagev1.Storag } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.CreateOptions) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeStorageClasses) Create(storageClass *storagev1.StorageClass) (resul } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *storagev1.StorageClass, opts v1.UpdateOptions) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &storagev1.StorageClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeStorageClasses) Update(storageClass *storagev1.StorageClass) (resul } // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeStorageClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &storagev1.StorageClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) _, err := c.Fake.Invokes(action, &storagev1.StorageClassList{}) @@ -110,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.StorageClass, err error) { +func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &storagev1.StorageClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 58e09da46b..479bf6ff8c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + storagev1 "k8s.io/api/storage/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s. var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"} // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *storagev1.VolumeAttachmentList, err error) { +func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *storagev1.VolumeAttachmentList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &storagev1.VolumeAttachmentList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *storagev1.Vol } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.CreateOptions) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(volumeAttachment *storagev1.VolumeAttachm } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(volumeAttachment *storagev1.VolumeAttachm // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *storagev1.VolumeAttachment) (*storagev1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &storagev1.VolumeAttachment{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *storagev1.VolumeA } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) _, err := c.Fake.Invokes(action, &storagev1.VolumeAttachmentList{}) @@ -121,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &storagev1.VolumeAttachment{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 3f4c48f0a0..b23725cce9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/storage/v1" @@ -37,14 +38,14 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(*v1.StorageClass) (*v1.StorageClass, error) - Update(*v1.StorageClass) (*v1.StorageClass, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.StorageClass, error) - List(opts metav1.ListOptions) (*v1.StorageClassList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error) + Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) StorageClassExpansion } @@ -61,19 +62,19 @@ func newStorageClasses(c *StorageV1Client) *storageClasses { } // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { +func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Get(). Resource("storageclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { +func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Post(). Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Put(). Resource("storageclasses"). Name(storageClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) error { +func (c *storageClasses) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("storageclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *storageClasses) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOpt VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { +func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} err = c.client.Patch(pt). Resource("storageclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index 0f45097b20..1a466070e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" "time" v1 "k8s.io/api/storage/v1" @@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) - List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) + Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { } // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Get(). Resource("volumeattachments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { +func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAtta Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, err Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { +func (c *volumeAttachments) Delete(ctx context.Context, name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("volumeattachments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, list VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { result = &v1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index 86f53e2d4d..96cbf0259f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s. var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1alpha1", Kind: "VolumeAttachment"} // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { +func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.Volu } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachme } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachme // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAt } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) @@ -121,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 7fef94e8d8..4ecc7eb21c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" "time" v1alpha1 "k8s.io/api/storage/v1alpha1" @@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) - List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { } // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Get(). Resource("volumeattachments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { +func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *volumeAttachments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("volumeattachments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOpti VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { result = &v1alpha1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 86cf9bf180..0420bf9b26 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/storage/v1beta1" @@ -37,14 +38,14 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) - List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) + Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) CSIDriverExpansion } @@ -61,19 +62,19 @@ func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { } // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Get(). Resource("csidrivers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { +func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, e Resource("csidrivers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("csidrivers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Post(). Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSIDriver). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Put(). Resource("csidrivers"). Name(cSIDriver.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSIDriver). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { +func (c *cSIDrivers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("csidrivers"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *cSIDrivers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1. VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { +func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { result = &v1beta1.CSIDriver{} err = c.client.Patch(pt). Resource("csidrivers"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index e5540c1287..7f73701753 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/storage/v1beta1" @@ -37,14 +38,14 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) - List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) + Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error) + Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) CSINodeExpansion } @@ -61,19 +62,19 @@ func newCSINodes(c *StorageV1beta1Client) *cSINodes { } // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Get(). Resource("csinodes"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { +func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err e Resource("csinodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("csinodes"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Post(). Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Put(). Resource("csinodes"). Name(cSINode.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(cSINode). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { +func (c *cSINodes) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("csinodes"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *cSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { +func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { result = &v1beta1.CSINode{} err = c.client.Patch(pt). Resource("csinodes"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index 2446316b2d..7fe60c7304 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var csidriversResource = schema.GroupVersionResource{Group: "storage.k8s.io", Ve var csidriversKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSIDriver"} // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(csidriversResource, name), &v1beta1.CSIDriver{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeCSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { +func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1beta1.CSIDriverList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeCSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverLis } // Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeCSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.C } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeCSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.C } // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(csidriversResource, name), &v1beta1.CSIDriver{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(csidriversResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{}) @@ -110,7 +112,7 @@ func (c *FakeCSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { +func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index 0050f4743d..af22e22f9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var csinodesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Vers var csinodesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSINode"} // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(csinodesResource, name), &v1beta1.CSINode{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeCSINodes) Get(name string, options v1.GetOptions) (result *v1beta1. } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { +func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1beta1.CSINodeList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeCSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, e } // Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeCSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeCSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode } // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeCSINodes) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(csinodesResource, name), &v1beta1.CSINode{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeCSINodes) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(csinodesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{}) @@ -110,7 +112,7 @@ func (c *FakeCSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v } // Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { +func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 9fc8ca991e..8d53f8240d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io" var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClass"} // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1 } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { +func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageC } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) if obj == nil { @@ -95,14 +97,14 @@ func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result } // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeStorageClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) @@ -110,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt } // Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { +func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 043098f455..30061b0619 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package fake import ( + "context" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -38,7 +40,7 @@ var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s. var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachment"} // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -48,7 +50,7 @@ func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { +func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { obj, err := c.Fake. Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{}) if obj == nil { @@ -69,13 +71,13 @@ func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1beta1.Volum } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -85,7 +87,7 @@ func (c *FakeVolumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachmen } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -96,7 +98,7 @@ func (c *FakeVolumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachmen // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) { obj, err := c.Fake. Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) if obj == nil { @@ -106,14 +108,14 @@ func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAtt } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) @@ -121,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list } // Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { +func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 8a8f389161..553d40f684 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/storage/v1beta1" @@ -37,14 +38,14 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) - List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error) + Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) StorageClassExpansion } @@ -61,19 +62,19 @@ func newStorageClasses(c *StorageV1beta1Client) *storageClasses { } // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Get(). Resource("storageclasses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { +func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -83,13 +84,13 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -99,44 +100,46 @@ func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Post(). Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Put(). Resource("storageclasses"). Name(storageClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(storageClass). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { +func (c *storageClasses) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("storageclasses"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *storageClasses) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -146,19 +149,20 @@ func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { +func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} err = c.client.Patch(pt). Resource("storageclasses"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index d319407f27..f9eb953845 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" "time" v1beta1 "k8s.io/api/storage/v1beta1" @@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error) - List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) + Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + Delete(ctx context.Context, name string, opts *v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts *v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { } // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Get(). Resource("volumeattachments"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { +func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -100,59 +101,61 @@ func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(volumeAttachment). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *volumeAttachments) Delete(ctx context.Context, name string, options *v1.DeleteOptions) error { return c.client.Delete(). Resource("volumeattachments"). Name(name). Body(options). - Do(). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *volumeAttachments) DeleteCollection(ctx context.Context, options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second @@ -162,19 +165,20 @@ func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOpti VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). - Do(). + Do(ctx). Error() } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { +func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { result = &v1beta1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go index a94fe7a43f..55cce295d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/metadata/metadata.go @@ -17,6 +17,7 @@ limitations under the License. package metadata import ( + "context" "encoding/json" "fmt" "time" @@ -135,7 +136,7 @@ func (c *client) Delete(name string, opts *metav1.DeleteOptions, subresources .. Delete(). AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(deleteOptionsByte). - Do() + Do(context.TODO()) return result.Error() } @@ -154,7 +155,7 @@ func (c *client) DeleteCollection(opts *metav1.DeleteOptions, listOptions metav1 AbsPath(c.makeURLSegments("")...). Body(deleteOptionsByte). SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) return result.Error() } @@ -166,7 +167,7 @@ func (c *client) Get(name string, opts metav1.GetOptions, subresources ...string result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -202,7 +203,7 @@ func (c *client) List(opts metav1.ListOptions) (*metav1.PartialObjectMetadataLis result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -242,7 +243,7 @@ func (c *client) Watch(opts metav1.ListOptions) (watch.Interface, error) { SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Timeout(timeout). - Watch() + Watch(context.TODO()) } // Patch modifies the named resource in the specified scope (namespace or cluster). @@ -256,7 +257,7 @@ func (c *client) Patch(name string, pt types.PatchType, data []byte, opts metav1 Body(data). SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go index 51b76e3050..6b3db4aa05 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go @@ -30,6 +30,7 @@ import ( "reflect" "strconv" "strings" + "sync" "time" "golang.org/x/net/http2" @@ -51,6 +52,9 @@ var ( // throttled (via the provided rateLimiter) for more than longThrottleLatency will // be logged. longThrottleLatency = 50 * time.Millisecond + + // extraLongThrottleLatency defines the threshold for logging requests at log level 2. + extraLongThrottleLatency = 1 * time.Second ) // HTTPClient is an interface for testing a request object. @@ -61,8 +65,8 @@ type HTTPClient interface { // ResponseWrapper is an interface for getting a response. // The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. type ResponseWrapper interface { - DoRaw() ([]byte, error) - Stream() (io.ReadCloser, error) + DoRaw(context.Context) ([]byte, error) + Stream(context.Context) (io.ReadCloser, error) } // RequestConstructionError is returned when there's an error assembling a request. @@ -104,9 +108,6 @@ type Request struct { // output err error body io.Reader - - // This is only used for per-request timeouts, deadlines, and cancellations. - ctx context.Context } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. @@ -438,13 +439,6 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// Context adds a context to the request. Contexts are only used for -// timeouts, deadlines, and cancellations. -func (r *Request) Context(ctx context.Context) *Request { - r.ctx = ctx - return r -} - // URL returns the current working URL. func (r *Request) URL() *url.URL { p := r.pathPrefix @@ -548,29 +542,56 @@ func (r Request) finalURLTemplate() url.URL { return *url } -func (r *Request) tryThrottle() error { +func (r *Request) tryThrottle(ctx context.Context) error { if r.rateLimiter == nil { return nil } now := time.Now() - var err error - if r.ctx != nil { - err = r.rateLimiter.Wait(r.ctx) - } else { - r.rateLimiter.Accept() - } - if latency := time.Since(now); latency > longThrottleLatency { + err := r.rateLimiter.Wait(ctx) + + latency := time.Since(now) + if latency > longThrottleLatency { klog.V(3).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } + if latency > extraLongThrottleLatency { + globalThrottledLogger.Log(2, fmt.Sprintf("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())) + } return err } +type throttledLogger struct { + logTimeLock sync.RWMutex + lastLogTime time.Time + minLogInterval time.Duration +} + +var globalThrottledLogger = &throttledLogger{ + minLogInterval: 1 * time.Second, +} + +func (b *throttledLogger) Log(level klog.Level, message string) { + if bool(klog.V(level)) { + if func() bool { + b.logTimeLock.RLock() + defer b.logTimeLock.RUnlock() + return time.Since(b.lastLogTime) > b.minLogInterval + }() { + b.logTimeLock.Lock() + defer b.logTimeLock.Unlock() + if time.Since(b.lastLogTime) > b.minLogInterval { + klog.V(level).Info(message) + b.lastLogTime = time.Now() + } + } + } +} + // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. -func (r *Request) Watch() (watch.Interface, error) { +func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { // We specifically don't want to rate limit watches, so we // don't use r.rateLimiter here. if r.err != nil { @@ -582,9 +603,7 @@ func (r *Request) Watch() (watch.Interface, error) { if err != nil { return nil, err } - if r.ctx != nil { - req = req.WithContext(r.ctx) - } + req = req.WithContext(ctx) req.Header = r.headers client := r.c.Client if client == nil { @@ -659,12 +678,12 @@ func updateURLMetrics(req *Request, resp *http.Response, err error) { // Returns io.ReadCloser which could be used for streaming of the response, or an error // Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. // If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. -func (r *Request) Stream() (io.ReadCloser, error) { +func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if r.err != nil { return nil, r.err } - if err := r.tryThrottle(); err != nil { + if err := r.tryThrottle(ctx); err != nil { return nil, err } @@ -676,9 +695,7 @@ func (r *Request) Stream() (io.ReadCloser, error) { if r.body != nil { req.Body = ioutil.NopCloser(r.body) } - if r.ctx != nil { - req = req.WithContext(r.ctx) - } + req = req.WithContext(ctx) req.Header = r.headers client := r.c.Client if client == nil { @@ -746,7 +763,7 @@ func (r *Request) requestPreflightCheck() error { // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the // server - the provided function is responsible for handling server errors. -func (r *Request) request(fn func(*http.Request, *http.Response)) error { +func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error { //Metrics for total request latency start := time.Now() defer func() { @@ -767,26 +784,30 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { client = http.DefaultClient } + // Throttle the first try before setting up the timeout configured on the + // client. We don't want a throttled client to return timeouts to callers + // before it makes a single request. + if err := r.tryThrottle(ctx); err != nil { + return err + } + + if r.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + // Right now we make about ten retry attempts if we get a Retry-After response. maxRetries := 10 retries := 0 for { + url := r.URL().String() req, err := http.NewRequest(r.verb, url, r.body) if err != nil { return err } - if r.timeout > 0 { - if r.ctx == nil { - r.ctx = context.Background() - } - var cancelFn context.CancelFunc - r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout) - defer cancelFn() - } - if r.ctx != nil { - req = req.WithContext(r.ctx) - } + req = req.WithContext(ctx) req.Header = r.headers r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) @@ -794,7 +815,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // We are retrying the request that we already send to apiserver // at least once before. // This request should also be throttled with the client-internal rate limiter. - if err := r.tryThrottle(); err != nil { + if err := r.tryThrottle(ctx); err != nil { return err } } @@ -869,13 +890,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // Error type: // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. -func (r *Request) Do() Result { - if err := r.tryThrottle(); err != nil { - return Result{err: err} - } - +func (r *Request) Do(ctx context.Context) Result { var result Result - err := r.request(func(req *http.Request, resp *http.Response) { + err := r.request(ctx, func(req *http.Request, resp *http.Response) { result = r.transformResponse(resp, req) }) if err != nil { @@ -885,13 +902,9 @@ func (r *Request) Do() Result { } // DoRaw executes the request but does not process the response body. -func (r *Request) DoRaw() ([]byte, error) { - if err := r.tryThrottle(); err != nil { - return nil, err - } - +func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { var result Result - err := r.request(func(req *http.Request, resp *http.Response) { + err := r.request(ctx, func(req *http.Request, resp *http.Response) { result.body, result.err = ioutil.ReadAll(resp.Body) glogBody("Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/client.go index 17519345eb..26febcb8b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/client.go @@ -17,6 +17,7 @@ limitations under the License. package scale import ( + "context" "fmt" autoscaling "k8s.io/api/autoscaling/v1" @@ -154,7 +155,7 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) Resource(gvr.Resource). Name(name). SubResource("scale"). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } @@ -196,7 +197,7 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut Name(scale.Name). SubResource("scale"). Body(scaleUpdateBytes). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { // propagate "raw" error from the API // this allows callers to interpret underlying Reason field @@ -216,7 +217,7 @@ func (c *namespacedScaleClient) Patch(gvr schema.GroupVersionResource, name stri Name(name). SubResource("scale"). Body(data). - Do() + Do(context.TODO()) if err := result.Error(); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go b/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go index f56b34ee87..b6b2c1f222 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go @@ -439,8 +439,18 @@ func (a ActionImpl) GetSubresource() string { return a.Subresource } func (a ActionImpl) Matches(verb, resource string) bool { + // Stay backwards compatible. + if !strings.Contains(resource, "/") { + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) + } + + parts := strings.SplitN(resource, "/", 2) + topresource, subresource := parts[0], parts[1] + return strings.EqualFold(verb, a.Verb) && - strings.EqualFold(resource, a.Resource.Resource) + strings.EqualFold(topresource, a.Resource.Resource) && + strings.EqualFold(subresource, a.Subresource) } func (a ActionImpl) DeepCopy() Action { ret := a diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listwatch.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listwatch.go index 10a1520358..10b7e6512e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -17,6 +17,8 @@ limitations under the License. package cache import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" @@ -82,7 +84,7 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). - Do(). + Do(context.TODO()). Get() } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { @@ -92,7 +94,7 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). - Watch() + Watch(context.TODO()) } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go index 32ef798a23..dfdc2e738e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -66,15 +66,18 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // period controls timing between an unsuccessful watch ending and - // the beginning of the next list. - period time.Duration - // The period at which ShouldResync is invoked + + // backoff manages backoff of ListWatch + backoffManager wait.BackoffManager + resyncPeriod time.Duration // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked ShouldResync func() bool // clock allows tests to manipulate time clock clock.Clock + // paginatedResult defines whether pagination should be forced for list calls. + // It is set based on the result of the initial list call. + paginatedResult bool // lastSyncResourceVersion is the resource version token last // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store @@ -85,7 +88,12 @@ type Reflector struct { // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex // WatchListPageSize is the requested chunk size of initial and resync watch lists. - // Defaults to pager.PageSize. + // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data + // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") + // it will turn off pagination to allow serving them from watch cache. + // NOTE: It should be used carefully as paginated lists are always served directly from + // etcd, which is significantly less efficient and may lead to serious performance and + // scalability problems. WatchListPageSize int64 } @@ -119,13 +127,17 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + realClock := &clock.RealClock{} r := &Reflector{ name: name, listerWatcher: lw, store: store, - period: time.Second, - resyncPeriod: resyncPeriod, - clock: &clock.RealClock{}, + // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when + // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is + // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, } r.setExpectedType(expectedType) return r @@ -160,12 +172,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) - wait.Until(func() { + klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) } - }, r.period, stopCh) + }, r.backoffManager, true, stopCh) + klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) } var ( @@ -204,6 +217,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) defer initTrace.LogIfLong(10 * time.Second) var list runtime.Object + var paginatedResult bool var err error listCh := make(chan struct{}, 1) panicCh := make(chan interface{}, 1) @@ -218,11 +232,30 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { return r.listerWatcher.List(opts) })) - if r.WatchListPageSize != 0 { + switch { + case r.WatchListPageSize != 0: pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 } - list, err = pager.List(context.Background(), options) + list, paginatedResult, err = pager.List(context.Background(), options) if isExpiredError(err) { r.setIsLastSyncResourceVersionExpired(true) // Retry immediately if the resource version used to list is expired. @@ -230,7 +263,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // continuation pages, but the pager might not be enabled, or the full list might fail because the // resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all // to recover and ensure the reflector makes forward progress. - list, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) } close(listCh) }() @@ -244,6 +277,21 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err) } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + r.setIsLastSyncResourceVersionExpired(false) // list was successful initTrace.Step("Objects listed") listMetaInterface, err := meta.ListAccessor(list) @@ -320,7 +368,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { switch { case isExpiredError(err): - r.setIsLastSyncResourceVersionExpired(true) + // Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) case err == io.EOF: // watch closed normally @@ -344,8 +394,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != errorStopRequested { switch { case isExpiredError(err): - r.setIsLastSyncResourceVersionExpired(true) - klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + // Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) default: klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 6aefd56c21..826a70460b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -486,7 +487,21 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { if err := s.indexer.Update(d.Object); err != nil { return err } - isSync := d.Type == Sync + + isSync := false + switch { + case d.Type == Sync: + // Sync events are only propagated to listeners that requested resync + isSync = true + case d.Type == Replaced: + if accessor, err := meta.Accessor(d.Object); err == nil { + if oldAccessor, err := meta.Accessor(old); err == nil { + // Replaced events that didn't change resourceVersion are treated as resync events + // and only propagated to listeners that requested resync + isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() + } + } + } s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) } else { if err := s.indexer.Add(d.Object); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index fd152b072c..63f805886a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -44,7 +45,7 @@ type ConfigMapLock struct { func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{}) + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(context.TODO(), cml.ConfigMapMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -66,7 +67,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { if err != nil { return err } - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{ + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cml.ConfigMapMeta.Name, Namespace: cml.ConfigMapMeta.Namespace, @@ -74,7 +75,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { LeaderElectionRecordAnnotationKey: string(recordBytes), }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -88,7 +89,7 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { return err } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm) + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(context.TODO(), cml.cm, metav1.UpdateOptions{}) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index f5a8ffcc86..1c24736ce3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -39,7 +40,7 @@ type EndpointsLock struct { func (el *EndpointsLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(context.TODO(), el.EndpointsMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -61,7 +62,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { if err != nil { return err } - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{ + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(context.TODO(), &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: el.EndpointsMeta.Name, Namespace: el.EndpointsMeta.Namespace, @@ -69,7 +70,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { LeaderElectionRecordAnnotationKey: string(recordBytes), }, }, - }) + }, metav1.CreateOptions{}) return err } @@ -86,7 +87,7 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { el.e.Annotations = make(map[string]string) } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e) + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(context.TODO(), el.e, metav1.UpdateOptions{}) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 74016b8df1..8695ca9a48 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -39,7 +40,7 @@ type LeaseLock struct { // Get returns the election record from a Lease spec func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{}) + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(context.TODO(), ll.LeaseMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -54,13 +55,13 @@ func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { // Create attempts to create a Lease func (ll *LeaseLock) Create(ler LeaderElectionRecord) error { var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{ + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(context.TODO(), &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: ll.LeaseMeta.Name, Namespace: ll.LeaseMeta.Namespace, }, Spec: LeaderElectionRecordToLeaseSpec(&ler), - }) + }, metav1.CreateOptions{}) return err } @@ -71,7 +72,7 @@ func (ll *LeaseLock) Update(ler LeaderElectionRecord) error { } ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease) + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(context.TODO(), ll.lease, metav1.UpdateOptions{}) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go index 307808be1e..f6c6a01298 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go @@ -73,16 +73,18 @@ func New(fn ListPageFunc) *ListPager { // List returns a single list object, but attempts to retrieve smaller chunks from the // server to reduce the impact on the server. If the chunk attempt fails, it will load // the full list instead. The Limit field on options, if unset, will default to the page size. -func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { if options.Limit == 0 { options.Limit = p.PageSize } requestedResourceVersion := options.ResourceVersion var list *metainternalversion.List + paginatedResult := false + for { select { case <-ctx.Done(): - return nil, ctx.Err() + return nil, paginatedResult, ctx.Err() default: } @@ -93,23 +95,24 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // failing when the resource versions is established by the first page request falls out of the compaction // during the subsequent list requests). if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { - return nil, err + return nil, paginatedResult, err } // the list expired while we were processing, fall back to a full list at // the requested ResourceVersion. options.Limit = 0 options.Continue = "" options.ResourceVersion = requestedResourceVersion - return p.PageFn(ctx, options) + result, err := p.PageFn(ctx, options) + return result, paginatedResult, err } m, err := meta.ListAccessor(obj) if err != nil { - return nil, fmt.Errorf("returned object must be a list: %v", err) + return nil, paginatedResult, fmt.Errorf("returned object must be a list: %v", err) } // exit early and return the object we got if we haven't processed any pages if len(m.GetContinue()) == 0 && list == nil { - return obj, nil + return obj, paginatedResult, nil } // initialize the list and fill its contents @@ -122,12 +125,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti list.Items = append(list.Items, obj) return nil }); err != nil { - return nil, err + return nil, paginatedResult, err } // if we have no more items, return the list if len(m.GetContinue()) == 0 { - return list, nil + return list, paginatedResult, nil } // set the next loop up @@ -136,6 +139,8 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // `specifying resource version is not allowed when using continue` error. // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. options.ResourceVersion = "" + // At this point, result is already paginated. + paginatedResult = true } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/fake.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/fake.go index 8546c1dd1c..2ff444ea88 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/fake.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/fake.go @@ -42,7 +42,7 @@ func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageF } func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args) + f.Eventf(object, eventtype, reason, messageFmt, args...) } // NewFakeRecorder creates new fake event recorder with event channel with diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go index e36f2d3e13..f26c0859a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go @@ -62,12 +62,12 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter csr.GenerateName = "csr-" } - req, err = client.Create(csr) + req, err = client.Create(context.TODO(), csr, metav1.CreateOptions{}) switch { case err == nil: case errors.IsAlreadyExists(err) && len(name) > 0: klog.Infof("csr for this node already exists, reusing") - req, err = client.Get(name, metav1.GetOptions{}) + req, err = client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, formatError("cannot retrieve certificate signing request: %v", err) } @@ -87,11 +87,11 @@ func WaitForCertificate(ctx context.Context, client certificatesclient.Certifica lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return client.List(options) + return client.List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return client.Watch(options) + return client.Watch(context.TODO(), options) }, } event, err := watchtools.UntilWithSync( diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod index e88a5ca6e7..ccfb79b432 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.mod @@ -9,7 +9,7 @@ require ( k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/klog v1.0.0 - k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc + k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 ) replace ( diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum index 368bb2221c..5f192c47f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/go.sum @@ -46,20 +46,19 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -87,7 +86,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -102,7 +100,6 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -155,7 +152,6 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -178,8 +174,11 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54= -k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 h1:KCcLuc/HD1RogJgEbZi9ObRuLv1bgiRCfAbidLKrUpg= +k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go index d96e751520..9350007520 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/conditions.go @@ -17,6 +17,7 @@ limitations under the License. package helpers import ( + "context" "encoding/json" "time" @@ -51,6 +52,6 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N if err != nil { return err } - _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) + _, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go index 80e77bb145..94f53edab9 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/labels.go @@ -17,6 +17,7 @@ limitations under the License. package helpers import ( + "context" "encoding/json" "fmt" "time" @@ -64,10 +65,10 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -94,7 +95,7 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/taints.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/taints.go index 2e3e31cce5..fa8009dd49 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/taints.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/node/helpers/taints.go @@ -25,6 +25,7 @@ is moved to an external repository, this file should be removed and replaced wit package helpers import ( + "context" "encoding/json" "fmt" "time" @@ -58,10 +59,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -106,7 +107,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/BUILD b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/BUILD index 793b6d39f9..fa63eecc3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go index f0eb5015f8..0609361762 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/service/helpers/helper.go @@ -17,11 +17,13 @@ limitations under the License. package helpers import ( + "context" "encoding/json" "fmt" "strings" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -136,7 +138,7 @@ func PatchService(c corev1.CoreV1Interface, oldSvc, newSvc *v1.Service) (*v1.Ser return nil, err } - return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD index ea0fcfcd5c..16385904f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/BUILD @@ -94,8 +94,6 @@ package_group( "//staging/src/k8s.io/apiserver/pkg/admission/metrics", "//staging/src/k8s.io/apiserver/pkg/util/flowcontrol/metrics", "//staging/src/k8s.io/component-base/metrics/...", - "//test/e2e", - "//test/e2e/storage", "//test/e2e_node", "//vendor/...", ], diff --git a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum index beded9573b..8c6e84a5ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum +++ b/cluster-autoscaler/vendor/k8s.io/csi-translation-lib/go.sum @@ -42,8 +42,9 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= @@ -90,7 +91,6 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -138,7 +138,6 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -158,7 +157,10 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index 2c78594789..87e5b62ec6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -506,6 +506,26 @@ func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]s return newAnnotations } +// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is defined outside the autogenerated file for use by other API packages +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is defined outside the autogenerated file for use by other API packages +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) +} + +// Convert_core_Volume_To_v1_Volume is defined outside the autogenerated file for use by other API packages +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1_Volume(in, out, s) +} + +// Convert_v1_Volume_To_core_Volume is defined outside the autogenerated file for use by other API packages +func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_core_Volume(in, out, s) +} + // Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index 21697bf5a0..a930520f1b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -85,6 +85,24 @@ func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, erro return resource.ParseQuantity(pageSize) } +// HugePageUnitSizeFromByteSize returns hugepage size has the format. +// `size` must be guaranteed to divisible into the largest units that can be expressed. +// B (1024 = "1KB", 1048576 = "1MB", etc). +func HugePageUnitSizeFromByteSize(size int64) (string, error) { + // hugePageSizeUnitList is borrowed from opencontainers/runc/libcontainer/cgroups/utils.go + var hugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"} + idx := 0 + len := len(hugePageSizeUnitList) - 1 + for size%1024 == 0 && idx < len { + size /= 1024 + idx++ + } + if size > 1024 && idx < len { + return "", fmt.Errorf("size: %d%s must be guaranteed to divisible into the largest units", size, hugePageSizeUnitList[idx]) + } + return fmt.Sprintf("%d%s", size, hugePageSizeUnitList[idx]), nil +} + // IsOvercommitAllowed returns true if the resource is in the default // namespace and is not hugepages. func IsOvercommitAllowed(name v1.ResourceName) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index ad7eb44594..1ed9df91e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -821,16 +821,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*v1.LocalObjectReference), b.(*core.LocalObjectReference), scope) }); err != nil { @@ -1916,16 +1906,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope) }); err != nil { @@ -2056,6 +2036,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*core.NodeSpec)(nil), (*v1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*v1.NodeSpec), scope) }); err != nil { @@ -2091,6 +2076,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*v1.NodeSpec), b.(*core.NodeSpec), scope) }); err != nil { @@ -2151,6 +2146,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope) + }); err != nil { + return err + } return nil } @@ -4263,21 +4263,11 @@ func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBal return nil } -// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. -func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) -} - func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) return nil } -// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. -func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { out.Name = in.Name return nil @@ -7824,11 +7814,6 @@ func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s con return nil } -// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. -func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_core_Volume(in, out, s) -} - func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { out.Name = in.Name if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { @@ -7837,11 +7822,6 @@ func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s con return nil } -// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. -func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_core_Volume_To_v1_Volume(in, out, s) -} - func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { out.Name = in.Name out.DevicePath = in.DevicePath diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 8e3cfd9d9e..cec7ebf1d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -4424,8 +4424,15 @@ func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *fie return allErrs } +// NodeValidationOptions contains the different settings for node validation +type NodeValidationOptions struct { + // Should node a spec containing more than one huge page resource (with different sizes) + // with pre-allocated memory trigger validation errors + ValidateSingleHugePageResource bool +} + // ValidateNode tests if required fields in the node are set. -func ValidateNode(node *core.Node) field.ErrorList { +func ValidateNode(node *core.Node, opts NodeValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) @@ -4436,7 +4443,7 @@ func ValidateNode(node *core.Node) field.ErrorList { // Only validate spec. // All status fields are optional and can be updated later. // That said, if specified, we need to ensure they are valid. - allErrs = append(allErrs, ValidateNodeResources(node)...) + allErrs = append(allErrs, ValidateNodeResources(node, opts)...) // validate PodCIDRS only if we need to if len(node.Spec.PodCIDRs) > 0 { @@ -4476,13 +4483,33 @@ func ValidateNode(node *core.Node) field.ErrorList { } // ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. -func ValidateNodeResources(node *core.Node) field.ErrorList { +func ValidateNodeResources(node *core.Node, opts NodeValidationOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.ValidateSingleHugePageResource { + allErrs = append(allErrs, ValidateNodeSingleHugePageResources(node)...) + } + + // Validate resource quantities in capacity. + for k, v := range node.Status.Capacity { + resPath := field.NewPath("status", "capacity", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + + // Validate resource quantities in allocatable. + for k, v := range node.Status.Allocatable { + resPath := field.NewPath("status", "allocatable", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + return allErrs +} + +// ValidateNodeHugePageResources is used to make sure a node has valid capacity and allocatable values for the huge page resources. +func ValidateNodeSingleHugePageResources(node *core.Node) field.ErrorList { allErrs := field.ErrorList{} // Validate resource quantities in capacity. hugePageSizes := sets.NewString() for k, v := range node.Status.Capacity { resPath := field.NewPath("status", "capacity", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) // track any huge page size that has a positive value if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { hugePageSizes.Insert(string(k)) @@ -4495,7 +4522,6 @@ func ValidateNodeResources(node *core.Node) field.ErrorList { hugePageSizes = sets.NewString() for k, v := range node.Status.Allocatable { resPath := field.NewPath("status", "allocatable", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) // track any huge page size that has a positive value if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { hugePageSizes.Insert(string(k)) @@ -4508,7 +4534,7 @@ func ValidateNodeResources(node *core.Node) field.ErrorList { } // ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { +func ValidateNodeUpdate(node, oldNode *core.Node, opts NodeValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) @@ -4519,7 +4545,7 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) // } - allErrs = append(allErrs, ValidateNodeResources(node)...) + allErrs = append(allErrs, ValidateNodeResources(node, opts)...) // Validate no duplicate addresses in node status. addresses := make(map[core.NodeAddress]bool) @@ -5006,7 +5032,7 @@ func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) if oldSecret.Immutable != nil && *oldSecret.Immutable { - if !reflect.DeepEqual(newSecret.Immutable, oldSecret.Immutable) { + if newSecret.Immutable == nil || !*newSecret.Immutable { allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set")) } if !reflect.DeepEqual(newSecret.Data, oldSecret.Data) { @@ -5063,7 +5089,7 @@ func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList { allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) if oldCfg.Immutable != nil && *oldCfg.Immutable { - if !reflect.DeepEqual(newCfg.Immutable, oldCfg.Immutable) { + if newCfg.Immutable == nil || !*newCfg.Immutable { allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set")) } if !reflect.DeepEqual(newCfg.Data, oldCfg.Data) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions index 1f266d036f..611ddf966d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/.import-restrictions @@ -1,18 +1,5 @@ { "Rules": [ - { - "SelectorRegexp": "k8s[.]io/utils", - "AllowedPrefixes": [ - "k8s.io/utils/exec", - "k8s.io/utils/io", - "k8s.io/utils/keymutex", - "k8s.io/utils/mount", - "k8s.io/utils/net", - "k8s.io/utils/nsenter", - "k8s.io/utils/path", - "k8s.io/utils/strings" - ] - }, { "SelectorRegexp": "k8s[.]io/kubernetes", "AllowedPrefixes": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions index 06305c5c73..67a6a8c068 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions @@ -11,309 +11,6 @@ "ForbiddenPrefixes": [ "k8s.io/kubernetes/pkg/client/unversioned/testclient" ] - }, - { - "SelectorRegexp": "k8s[.]io/(api/|apimachinery/|apiextensions-apiserver/|apiserver/)", - "AllowedPrefixes": [ - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta1", - "k8s.io/api/authentication/v1", - "k8s.io/api/authorization/v1beta1", - "k8s.io/api/autoscaling/v1", - "k8s.io/api/autoscaling/v2beta1", - "k8s.io/api/autoscaling/v2beta2", - "k8s.io/api/batch/v1", - "k8s.io/api/batch/v1beta1", - "k8s.io/api/certificates/v1beta1", - "k8s.io/api/core/v1", - "k8s.io/api/coordination/v1", - "k8s.io/api/discovery/v1beta1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/api/policy/v1beta1", - "k8s.io/api/rbac/v1", - "k8s.io/api/storage/v1", - "k8s.io/apimachinery/pkg/api/equality", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/meta", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/conversion", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/clock", - "k8s.io/apimachinery/pkg/util/diff", - "k8s.io/apimachinery/pkg/util/errors", - "k8s.io/apimachinery/pkg/util/intstr", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/rand", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/strategicpatch", - "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/util/version", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/apiserver/pkg/authentication/serviceaccount", - "k8s.io/apiserver/pkg/storage/names", - "k8s.io/apiserver/pkg/util/feature", - "k8s.io/apiextensions-apiserver/pkg/features", - "k8s.io/apimachinery/pkg/api/validation", - "k8s.io/apimachinery/pkg/apis/meta/internalversion", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/util/validation", - "k8s.io/apimachinery/pkg/util/validation/field", - "k8s.io/apiserver/pkg/authentication/authenticator", - "k8s.io/apiserver/pkg/authentication/user", - "k8s.io/apiserver/pkg/features", - "k8s.io/apiserver/pkg/registry/generic", - "k8s.io/apimachinery/pkg/version", - "k8s.io/api/imagepolicy/v1alpha1", - "k8s.io/apiserver/pkg/admission", - "k8s.io/apiserver/pkg/server/dynamiccertificates", - "k8s.io/apiserver/pkg/storage", - "k8s.io/api/batch/v2alpha1", - "k8s.io/apiserver/pkg/registry/rest", - "k8s.io/api/scheduling/v1alpha1", - "k8s.io/api/admissionregistration/v1", - "k8s.io/api/admissionregistration/v1beta1", - "k8s.io/api/authorization/v1", - "k8s.io/api/settings/v1alpha1", - "k8s.io/api/admission/v1beta1", - "k8s.io/api/admission/v1", - "k8s.io/api/networking/v1", - "k8s.io/component-base/config", - "k8s.io/component-base/config/v1alpha1", - "k8s.io/api/scheduling/v1" - ] - }, - { - "SelectorRegexp": "github[.]com/", - "AllowedPrefixes": [ - "github.com/cloudflare/cfssl/config", - "github.com/cloudflare/cfssl/helpers", - "github.com/cloudflare/cfssl/signer", - "github.com/cloudflare/cfssl/signer/local", - "github.com/davecgh/go-spew/spew", - "github.com/docker/distribution/reference", - "github.com/evanphx/json-patch", - "github.com/golang/groupcache/lru", - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud", - "github.com/google/gofuzz", - "github.com/prometheus/client_golang/prometheus", - "github.com/robfig/cron", - "github.com/spf13/pflag", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/google/gofuzz", - "github.com/golang/protobuf/ptypes/wrappers", - "github.com/golang/protobuf/proto", - "github.com/container-storage-interface/spec/lib/go/csi" - ] - }, - { - "SelectorRegexp": "k8s[.]io/client-go/", - "AllowedPrefixes": [ - "k8s.io/client-go/util/keyutil", - "k8s.io/client-go/discovery", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers", - "k8s.io/client-go/informers/apps/v1", - "k8s.io/client-go/informers/apps/v1beta1", - "k8s.io/client-go/informers/autoscaling/v1", - "k8s.io/client-go/informers/batch/v1", - "k8s.io/client-go/informers/certificates/v1beta1", - "k8s.io/client-go/informers/core/v1", - "k8s.io/client-go/informers/extensions/v1beta1", - "k8s.io/client-go/informers/policy/v1beta1", - "k8s.io/client-go/informers/rbac/v1", - "k8s.io/client-go/informers/storage/v1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/apps/v1", - "k8s.io/client-go/kubernetes/typed/authentication/v1", - "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "k8s.io/client-go/kubernetes/typed/rbac/v1", - "k8s.io/client-go/listers/apps/v1", - "k8s.io/client-go/listers/apps/v1beta1", - "k8s.io/client-go/listers/autoscaling/v1", - "k8s.io/client-go/listers/batch/v1", - "k8s.io/client-go/listers/certificates/v1beta1", - "k8s.io/client-go/listers/coordination/v1", - "k8s.io/client-go/listers/core/v1", - "k8s.io/client-go/listers/discovery/v1alpha1", - "k8s.io/client-go/listers/discovery/v1beta1", - "k8s.io/client-go/listers/coordination/v1", - "k8s.io/client-go/listers/extensions/v1beta1", - "k8s.io/client-go/listers/policy/v1beta1", - "k8s.io/client-go/listers/rbac/v1", - "k8s.io/client-go/listers/storage/v1", - "k8s.io/client-go/metadata", - "k8s.io/client-go/pkg/version", - "k8s.io/client-go/rest", - "k8s.io/client-go/scale", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/pager", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/tools/reference", - "k8s.io/client-go/tools/watch", - "k8s.io/client-go/transport", - "k8s.io/client-go/util/cert", - "k8s.io/client-go/util/flowcontrol", - "k8s.io/client-go/util/retry", - "k8s.io/client-go/util/testing", - "k8s.io/client-go/util/workqueue" - ] - }, - { - "SelectorRegexp": "k8s[.]io/kubernetes/pkg", - "AllowedPrefixes": [ - "k8s.io/kubernetes/pkg/api/legacyscheme", - "k8s.io/kubernetes/pkg/api/v1/endpoints", - "k8s.io/kubernetes/pkg/api/v1/node", - "k8s.io/kubernetes/pkg/api/v1/pod", - "k8s.io/kubernetes/pkg/apis/apps/v1", - "k8s.io/kubernetes/pkg/apis/autoscaling", - "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "k8s.io/kubernetes/pkg/apis/core", - "k8s.io/kubernetes/pkg/apis/core/helper", - "k8s.io/kubernetes/pkg/apis/core/install", - "k8s.io/kubernetes/pkg/apis/core/v1", - "k8s.io/kubernetes/pkg/apis/core/v1/helper", - "k8s.io/kubernetes/pkg/apis/core/validation", - "k8s.io/kubernetes/pkg/apis/discovery", - "k8s.io/kubernetes/pkg/apis/discovery/validation", - "k8s.io/kubernetes/pkg/cloudprovider", - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "k8s.io/kubernetes/pkg/controller", - "k8s.io/kubernetes/pkg/controller/apis/config", - "k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1", - "k8s.io/kubernetes/pkg/controller/certificates", - "k8s.io/kubernetes/pkg/controller/daemon", - "k8s.io/kubernetes/pkg/controller/daemon/util", - "k8s.io/kubernetes/pkg/controller/deployment", - "k8s.io/kubernetes/pkg/controller/deployment/util", - "k8s.io/kubernetes/pkg/controller/garbagecollector", - "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", - "k8s.io/kubernetes/pkg/controller/history", - "k8s.io/kubernetes/pkg/controller/job", - "k8s.io/kubernetes/pkg/controller/namespace", - "k8s.io/kubernetes/pkg/controller/namespace/deletion", - "k8s.io/kubernetes/pkg/controller/nodeipam", - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", - "k8s.io/kubernetes/pkg/controller/nodelifecycle", - "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", - "k8s.io/kubernetes/pkg/controller/podautoscaler", - "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - "k8s.io/kubernetes/pkg/controller/replicaset", - "k8s.io/kubernetes/pkg/controller/util/node", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", - "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", - "k8s.io/kubernetes/pkg/controller/volume/events", - "k8s.io/kubernetes/pkg/controller/volume/expand", - "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", - "k8s.io/kubernetes/pkg/features", - "k8s.io/kubernetes/pkg/kubectl/scheme", - "k8s.io/kubernetes/pkg/kubelet/apis", - "k8s.io/kubernetes/pkg/kubelet/events", - "k8s.io/kubernetes/pkg/kubelet/types", - "k8s.io/kubernetes/pkg/kubelet/util/format", - "k8s.io/kubernetes/pkg/quota", - "k8s.io/kubernetes/pkg/registry/core/secret", - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper", - "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", - "k8s.io/kubernetes/pkg/scheduler/nodeinfo", - "k8s.io/kubernetes/pkg/serviceaccount", - "k8s.io/kubernetes/pkg/util/goroutinemap", - "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "k8s.io/kubernetes/pkg/util/hash", - "k8s.io/kubernetes/pkg/util/labels", - "k8s.io/kubernetes/pkg/util/node", - "k8s.io/kubernetes/pkg/util/slice", - "k8s.io/kubernetes/pkg/util/taints", - "k8s.io/kubernetes/pkg/volume", - "k8s.io/kubernetes/pkg/volume/util", - "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "k8s.io/kubernetes/pkg/volume/util/recyclerclient", - "k8s.io/kubernetes/pkg/volume/util/subpath", - "k8s.io/kubernetes/pkg/volume/util/types", - "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", - "k8s.io/kubernetes/pkg/api/service", - "k8s.io/kubernetes/pkg/apis/scheduling", - "k8s.io/kubernetes/pkg/capabilities", - "k8s.io/kubernetes/pkg/master/ports", - "k8s.io/kubernetes/pkg/scheduler/api", - "k8s.io/kubernetes/pkg/scheduler/util", - "k8s.io/kubernetes/pkg/scheduler/listers", - "k8s.io/kubernetes/pkg/security/apparmor", - "k8s.io/kubernetes/pkg/util/parsers", - "k8s.io/kubernetes/pkg/fieldpath", - "k8s.io/kubernetes/pkg/scheduler/volumebinder", - "k8s.io/kubernetes/pkg/util/resizefs", - "k8s.io/kubernetes/pkg/apis/apps", - "k8s.io/kubernetes/pkg/scheduler/metrics" - ] - }, - { - "SelectorRegexp": "k8s[.]io/(metrics/|utils/|heapster/|kube-controller-manager/)", - "AllowedPrefixes": [ - "k8s.io/heapster/metrics/api/v1/types", - "k8s.io/kube-controller-manager/config/v1alpha1", - "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2", - "k8s.io/metrics/pkg/apis/external_metrics/v1beta1", - "k8s.io/metrics/pkg/apis/metrics/v1alpha1", - "k8s.io/metrics/pkg/apis/metrics/v1beta1", - "k8s.io/metrics/pkg/client/clientset/versioned/scheme", - "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1", - "k8s.io/metrics/pkg/client/custom_metrics", - "k8s.io/metrics/pkg/client/external_metrics", - "k8s.io/utils/exec", - "k8s.io/utils/integer", - "k8s.io/utils/io", - "k8s.io/utils/mount", - "k8s.io/utils/net", - "k8s.io/utils/nsenter", - "k8s.io/utils/path", - "k8s.io/utils/pointer", - "k8s.io/utils/strings" - ] - }, - { - "SelectorRegexp": "golang[.]org/", - "AllowedPrefixes": [ - "golang.org/x/time/rate", - "golang.org/x/sys/unix", - "golang.org/x/oauth2", - "golang.org/x/net/context", - "google.golang.org/api/compute/v1", - "google.golang.org/api/googleapi", - "google.golang.org/api/compute/v0.alpha", - "google.golang.org/api/container/v1", - "google.golang.org/api/compute/v0.beta", - "google.golang.org/api/tpu/v1", - "golang.org/x/net/context", - "google.golang.org/grpc" - ] } ] } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index 54b4dbb06f..b9d8421c5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -22,7 +22,7 @@ import ( "time" v1authenticationapi "k8s.io/api/authentication/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -120,11 +120,11 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return b.CoreClient.Secrets(b.Namespace).List(options) + return b.CoreClient.Secrets(b.Namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return b.CoreClient.Secrets(b.Namespace).Watch(options) + return b.CoreClient.Secrets(b.Namespace).Watch(context.TODO(), options) }, } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -157,7 +157,7 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro if !valid { klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Namespace, sa.Name) // try to delete the secret containing the invalid token - if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + if err := b.CoreClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Namespace, sa.Name, err) } // continue watching for good tokens @@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, // Try token review first tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} - if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { + if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}); err == nil { if !tokenResult.Status.Authenticated { klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name) return nil, false, nil @@ -207,7 +207,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, if err != nil { return nil, false, err } - err = client.Get().AbsPath("/apis").Do().Error() + err = client.Get().AbsPath("/apis").Do(context.TODO()).Error() if apierrors.IsUnauthorized(err) { klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Namespace, sa.Name, err) return nil, false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder_dynamic.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder_dynamic.go index 19aaded8bf..4ece4e0af6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder_dynamic.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder_dynamic.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "net/http" "sync" @@ -25,6 +26,7 @@ import ( "golang.org/x/oauth2" v1authenticationapi "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" @@ -174,11 +176,11 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) { return false, nil } - tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(ts.serviceAccountName, &v1authenticationapi.TokenRequest{ + tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(context.TODO(), ts.serviceAccountName, &v1authenticationapi.TokenRequest{ Spec: v1authenticationapi.TokenRequestSpec{ ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds), }, - }) + }, metav1.CreateOptions{}) if inErr != nil { klog.Warningf("get token failed: %v", inErr) return false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index ec876e1eb2..772c2a218f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "encoding/binary" "encoding/json" "fmt" @@ -419,7 +420,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -439,7 +440,7 @@ type RealControllerRevisionControl struct { var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{} func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -536,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v } func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) return err } @@ -576,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT if len(labels.Set(pod.Labels)) == 0 { return fmt.Errorf("unable to create pods, no labels") } - newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod) + newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { // only send an event if the namespace isn't terminating if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { @@ -601,7 +602,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, nil); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } @@ -1013,10 +1014,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1070,10 +1071,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1118,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } @@ -1147,10 +1148,10 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1177,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil @@ -1185,7 +1186,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la } func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { - sa, err := coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { return sa, nil } @@ -1195,17 +1196,17 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam // Create the namespace if we can't verify it exists. // Tolerate errors, since we don't know whether this component has namespace creation permissions. - if _, err := coreClient.Namespaces().Get(namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { - if _, err = coreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) } } // Create the service account - sa, err = coreClient.ServiceAccounts(namespace).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}) + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // If we're racing to init and someone else already created it, re-fetch - return coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } return sa, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index f951ea1581..26305c776a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "fmt" "math" "sort" @@ -545,7 +546,7 @@ func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) // RsListFromClient returns an rsListFunc that wraps the given client. func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { - rsList, err := c.ReplicaSets(namespace).List(options) + rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go index c922748441..3f526ae09d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go @@ -64,6 +64,13 @@ const ( // recognize dynamically provisioned PVs in its decisions). AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + // AnnMigratedTo annotation is added to a PVC and PV that is supposed to be + // dynamically provisioned/deleted by by its corresponding CSI driver + // through the CSIMigration feature flags. When this annotation is set the + // Kubernetes components will "stand-down" and the external-provisioner will + // act on the objects + AnnMigratedTo = "pv.kubernetes.io/migrated-to" + // AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically // provisioned. Its value is name of volume plugin that is supposed to provision // a volume for this PVC. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go index 21bd947e13..564e26eef3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "sort" "strings" @@ -423,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // TODO: does it hurt if we make an api call and nothing needs to be updated? claimKey := claimToClaimKey(binding.pvc) klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) - newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv) + newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{}) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) return err @@ -438,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // PV controller is expect to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) - newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim) + newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 623c2f39f1..b14d12663f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -239,6 +239,15 @@ const ( // to the API server. BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume" + // owner: @mtaufen + // alpha: v1.18 + // + // Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service + // account issuer in the API server. + // Note these endpoints serve minimally-compliant discovery docs that are + // intended to be used for service account token verification. + ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery" + // owner: @Random-Liu // beta: v1.11 // @@ -429,6 +438,7 @@ const ( // owner: @adisky // alpha: v1.14 + // beta: v1.18 // // Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature. CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack" @@ -572,6 +582,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS TokenRequest: {Default: true, PreRelease: featuregate.Beta}, TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha}, + ServiceAccountIssuerDiscovery: {Default: false, PreRelease: featuregate.Alpha}, CRIContainerLogRotation: {Default: true, PreRelease: featuregate.Beta}, CSIMigration: {Default: true, PreRelease: featuregate.Beta}, CSIMigrationGCE: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires GCE PD CSI Driver) @@ -583,7 +594,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Alpha}, CSIMigrationAzureFileComplete: {Default: false, PreRelease: featuregate.Alpha}, RunAsGroup: {Default: true, PreRelease: featuregate.Beta}, - CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Alpha}, + CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires OpenStack Cinder CSI driver) CSIMigrationOpenStackComplete: {Default: false, PreRelease: featuregate.Alpha}, VolumeSubpath: {Default: true, PreRelease: featuregate.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go index f39e2b26ce..63ac1b9470 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go @@ -26,6 +26,7 @@ import ( // DevicesProvider knows how to provide the devices used by the given container type DevicesProvider interface { GetDevices(podUID, containerName string) []*v1alpha1.ContainerDevices + UpdateAllocatedDevices() } // PodsProvider knows how to provide the pods admitted by the node @@ -52,6 +53,7 @@ func NewPodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesPro func (p *podResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) { pods := p.podsProvider.GetPods() podResources := make([]*v1alpha1.PodResources, len(pods)) + p.devicesProvider.UpdateAllocatedDevices() for i, pod := range pods { pRes := v1alpha1.PodResources{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go index 7bdc42b3cb..1d0bc9cc0c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -291,7 +291,7 @@ func waitForServer(cfg restclient.Config, deadline time.Duration) error { var connected bool wait.JitterUntil(func() { - if _, err := cli.Get().AbsPath("/healthz").Do().Raw(); err != nil { + if _, err := cli.Get().AbsPath("/healthz").Do(context.TODO()).Raw(); err != nil { klog.Infof("Failed to connect to apiserver: %v", err) return } @@ -352,6 +352,7 @@ func requestNodeCertificate(client certificatesv1beta1.CertificateSigningRequest ctx, cancel := context.WithTimeout(context.Background(), 3600*time.Second) defer cancel() + klog.V(2).Infof("Waiting for client certificate to be issued") return csr.WaitForCertificate(ctx, client, req) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index 1bad39979c..41dbed9567 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -24,12 +24,12 @@ import ( "strings" "time" - units "github.com/docker/go-units" libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd" libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" "k8s.io/klog" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -387,7 +387,11 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont // for each page size enumerated, set that value pageSizes := sets.NewString() for pageSize, limit := range resourceConfig.HugePageLimit { - sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, libcontainercgroups.HugePageSizeUnitList) + sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize) + if err != nil { + klog.Warningf("pageSize is invalid: %v", err) + continue + } resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainerconfigs.HugepageLimit{ Pagesize: sizeString, Limit: uint64(limit), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index b91102f6a5..caa3de666c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -113,6 +113,9 @@ type ContainerManager interface { // GetTopologyPodAdmitHandler returns an instance of the TopologyManager for Pod Admission GetTopologyPodAdmitHandler() topologymanager.Manager + + // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. + UpdateAllocatedDevices() } type NodeConfig struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index c4056941aa..d5df6f18c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -946,3 +946,7 @@ func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podr func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { return cm.deviceManager.ShouldResetExtendedResourceCapacity() } + +func (cm *containerManagerImpl) UpdateAllocatedDevices() { + cm.deviceManager.UpdateAllocatedDevices() +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go index 7d6a214f4b..c21e8cce0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go @@ -121,6 +121,10 @@ func (cm *containerManagerStub) GetTopologyPodAdmitHandler() topologymanager.Man return nil } +func (cm *containerManagerStub) UpdateAllocatedDevices() { + return +} + func NewStubContainerManager() ContainerManager { return &containerManagerStub{shouldResetExtendedResourceCapacity: false} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go index e4a88b86b9..13271b98c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go @@ -181,3 +181,7 @@ func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { func (cm *containerManagerImpl) GetTopologyPodAdmitHandler() topologymanager.Manager { return nil } + +func (cm *containerManagerImpl) UpdateAllocatedDevices() { + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index 972813d9df..3a86292133 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -71,7 +71,7 @@ type Manager interface { // GetTopologyHints implements the topologymanager.HintProvider Interface // and is consulted to achieve NUMA aware resource alignment among this // and other resource controllers. - GetTopologyHints(v1.Pod, v1.Container) map[string][]topologymanager.TopologyHint + GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint } type manager struct { @@ -298,7 +298,7 @@ func (m *manager) State() state.Reader { return m.state } -func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { // Garbage collect any stranded resources before providing TopologyHints m.removeStaleState() // Delegate to active policy diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index d3b3eca6c1..d958935db6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -50,7 +50,7 @@ func (m *fakeManager) RemoveContainer(containerID string) error { return nil } -func (m *fakeManager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { klog.Infof("[fake cpumanager] Get Topology Hints") return map[string][]topologymanager.TopologyHint{} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go index 63e031b4a9..2031612be9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go @@ -33,5 +33,5 @@ type Policy interface { // GetTopologyHints implements the topologymanager.HintProvider Interface // and is consulted to achieve NUMA aware resource alignment among this // and other resource controllers. - GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint + GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go index be46369836..77cd367b64 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go @@ -52,6 +52,6 @@ func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName return nil } -func (p *nonePolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (p *nonePolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 8502c1f2f0..c088df866e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -276,7 +276,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int return int(cpuQuantity.Value()) } -func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { // If there are no CPU resources requested for this container, we do not // generate any topology hints. if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { @@ -284,7 +284,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1. } // Get a count of how many guaranteed CPUs have been requested. - requested := p.guaranteedCPUs(&pod, &container) + requested := p.guaranteedCPUs(pod, container) // If there are no guaranteed CPUs being requested, we do not generate // any topology hints. This can happen, for example, because init diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go index 212ad75f04..4b5838ae03 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go @@ -597,9 +597,9 @@ func (m *ManagerImpl) readCheckpoint() error { return nil } -// updateAllocatedDevices gets a list of active pods and then frees any Devices that are bound to -// terminated pods. Returns error on failure. -func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { +// UpdateAllocatedDevices frees any Devices that are bound to terminated pods. +func (m *ManagerImpl) UpdateAllocatedDevices() { + activePods := m.activePods() if !m.sourcesReady.AllReady() { return } @@ -773,7 +773,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont // Updates allocatedDevices to garbage collect any stranded resources // before doing the device plugin allocation. if !allocatedDevicesUpdated { - m.updateAllocatedDevices(m.activePods()) + m.UpdateAllocatedDevices() allocatedDevicesUpdated = true } allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource]) @@ -788,7 +788,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont // Manager.Allocate involves RPC calls to device plugin, which // could be heavy-weight. Therefore we want to perform this operation outside // mutex lock. Note if Allocate call fails, we may leave container resources - // partially allocated for the failed container. We rely on updateAllocatedDevices() + // partially allocated for the failed container. We rely on UpdateAllocatedDevices() // to garbage collect these resources later. Another side effect is that if // we have X resource A and Y resource B in total, and two containers, container1 // and container2 both require X resource A and Y resource B. Both allocation diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go index a22e01b29f..4301c7dc3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go @@ -65,7 +65,7 @@ func (h *ManagerStub) GetWatcherHandler() cache.PluginHandler { } // GetTopologyHints returns an empty TopologyHint map -func (h *ManagerStub) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (h *ManagerStub) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { return map[string][]topologymanager.TopologyHint{} } @@ -78,3 +78,8 @@ func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevice func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool { return false } + +// UpdateAllocatedDevices returns nothing +func (h *ManagerStub) UpdateAllocatedDevices() { + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go index 7a8d192589..971b6fcd63 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go @@ -27,9 +27,9 @@ import ( // GetTopologyHints implements the TopologyManager HintProvider Interface which // ensures the Device Manager is consulted when Topology Aware Hints for each // container are created. -func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { +func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { // Garbage collect any stranded device resources before providing TopologyHints - m.updateAllocatedDevices(m.activePods()) + m.UpdateAllocatedDevices() // Loop through all device resources and generate TopologyHints for them.. deviceHints := make(map[string][]topologymanager.TopologyHint) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go index 4d7c9b8af9..1b76a89a1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go @@ -67,7 +67,10 @@ type Manager interface { // TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface // and is consulted to make Topology aware resource alignments - GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint + GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint + + // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. + UpdateAllocatedDevices() } // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go index 8b84557825..3921bcc24a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go @@ -19,7 +19,6 @@ package topologymanager import ( "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) // Policy interface for Topology Manager Pod Admit Result @@ -28,7 +27,7 @@ type Policy interface { Name() string // Returns a merged TopologyHint based on input from hint providers // and a Pod Admit Handler Response based on hints and policy type - Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) + Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) } // Merge a TopologyHints permutation to a single hint by performing a bitwise-AND diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go index 1f02094a04..651f3a7657 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go @@ -16,10 +16,6 @@ limitations under the License. package topologymanager -import ( - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - type bestEffortPolicy struct { //List of NUMA Nodes available on the underlying machine numaNodes []int @@ -39,13 +35,11 @@ func (p *bestEffortPolicy) Name() string { return PolicyBestEffort } -func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { - return lifecycle.PodAdmitResult{ - Admit: true, - } +func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) bool { + return true } -func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { +func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { filteredProvidersHints := filterProvidersHints(providersHints) bestHint := mergeFilteredHints(p.numaNodes, filteredProvidersHints) admit := p.canAdmitPodResult(&bestHint) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go index 7e5010a765..271f9dde79 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go @@ -16,10 +16,6 @@ limitations under the License. package topologymanager -import ( - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - type nonePolicy struct{} var _ Policy = &nonePolicy{} @@ -36,12 +32,10 @@ func (p *nonePolicy) Name() string { return PolicyNone } -func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { - return lifecycle.PodAdmitResult{ - Admit: true, - } +func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) bool { + return true } -func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { +func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { return TopologyHint{}, p.canAdmitPodResult(nil) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go index e7882083b9..c3321c2030 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go @@ -16,10 +16,6 @@ limitations under the License. package topologymanager -import ( - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - type restrictedPolicy struct { bestEffortPolicy } @@ -38,20 +34,14 @@ func (p *restrictedPolicy) Name() string { return PolicyRestricted } -func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { +func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) bool { if !hint.Preferred { - return lifecycle.PodAdmitResult{ - Admit: false, - Reason: "Topology Affinity Error", - Message: "Resources cannot be allocated with Topology Locality", - } - } - return lifecycle.PodAdmitResult{ - Admit: true, + return false } + return true } -func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { +func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { filteredHints := filterProvidersHints(providersHints) hint := mergeFilteredHints(p.numaNodes, filteredHints) admit := p.canAdmitPodResult(&hint) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go index 224d4686ac..0e94c282d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go @@ -18,7 +18,6 @@ package topologymanager import ( "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) type singleNumaNodePolicy struct { @@ -40,17 +39,11 @@ func (p *singleNumaNodePolicy) Name() string { return PolicySingleNumaNode } -func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult { +func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) bool { if !hint.Preferred { - return lifecycle.PodAdmitResult{ - Admit: false, - Reason: "Topology Affinity Error", - Message: "Resources cannot be allocated with Topology Locality", - } - } - return lifecycle.PodAdmitResult{ - Admit: true, + return false } + return true } // Return hints that have valid bitmasks with exactly one bit set. @@ -71,7 +64,7 @@ func filterSingleNumaHints(allResourcesHints [][]TopologyHint) [][]TopologyHint return filteredResourcesHints } -func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) { +func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { filteredHints := filterProvidersHints(providersHints) // Filter to only include don't cares and hints with a single NUMA node. singleNumaHints := filterSingleNumaHints(filteredHints) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go index 4e452d71a0..dab59aa8aa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go @@ -76,7 +76,7 @@ type HintProvider interface { // this function for each hint provider, and merges the hints to produce // a consensus "best" hint. The hint providers may subsequently query the // topology manager to influence actual resource assignment. - GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint + GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint } //Store interface is to allow Hint Providers to retrieve pod affinity @@ -124,7 +124,7 @@ func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string numaNodes = append(numaNodes, node) } - if len(numaNodes) > maxAllowableNUMANodes { + if topologyPolicyName != PolicyNone && len(numaNodes) > maxAllowableNUMANodes { return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", maxAllowableNUMANodes) } @@ -164,7 +164,7 @@ func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint return m.podTopologyHints[podUID][containerName] } -func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (providersHints []map[string][]TopologyHint) { +func (m *manager) accumulateProvidersHints(pod *v1.Pod, container *v1.Container) (providersHints []map[string][]TopologyHint) { // Loop through all hint providers and save an accumulated list of the // hints returned by each hint provider. for _, provider := range m.hintProviders { @@ -177,7 +177,7 @@ func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) ( } // Collect Hints from hint providers and pass to policy to retrieve the best one. -func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) (TopologyHint, lifecycle.PodAdmitResult) { +func (m *manager) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) { providersHints := m.accumulateProvidersHints(pod, container) bestHint, admit := m.policy.Merge(providersHints) klog.Infof("[topologymanager] ContainerTopologyHint: %v", bestHint) @@ -194,35 +194,44 @@ func (m *manager) AddContainer(pod *v1.Pod, containerID string) error { } func (m *manager) RemoveContainer(containerID string) error { + klog.Infof("[topologymanager] RemoveContainer - Container ID: %v", containerID) + podUIDString := m.podMap[containerID] - delete(m.podTopologyHints, podUIDString) delete(m.podMap, containerID) - klog.Infof("[topologymanager] RemoveContainer - Container ID: %v podTopologyHints: %v", containerID, m.podTopologyHints) + if _, exists := m.podTopologyHints[podUIDString]; exists { + delete(m.podTopologyHints[podUIDString], containerID) + if len(m.podTopologyHints[podUIDString]) == 0 { + delete(m.podTopologyHints, podUIDString) + } + } + return nil } func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { - klog.Infof("[topologymanager] Topology Admit Handler") - if m.policy.Name() == "none" { - klog.Infof("[topologymanager] Skipping calculate topology affinity as policy: none") - return lifecycle.PodAdmitResult{ - Admit: true, - } + // Unconditionally admit the pod if we are running with the 'none' policy. + if m.policy.Name() == PolicyNone { + return lifecycle.PodAdmitResult{Admit: true} } + + klog.Infof("[topologymanager] Topology Admit Handler") pod := attrs.Pod - c := make(map[string]TopologyHint) + hints := make(map[string]TopologyHint) for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - result, admitPod := m.calculateAffinity(*pod, container) - if !admitPod.Admit { - return admitPod + result, admit := m.calculateAffinity(pod, &container) + if !admit { + return lifecycle.PodAdmitResult{ + Message: "Resources cannot be allocated with Topology locality", + Reason: "TopologyAffinityError", + Admit: false, + } } - c[container.Name] = result + hints[container.Name] = result } - m.podTopologyHints[string(pod.UID)] = c + + m.podTopologyHints[string(pod.UID)] = hints klog.Infof("[topologymanager] Topology Affinity for Pod: %v are %v", pod.UID, m.podTopologyHints[string(pod.UID)]) - return lifecycle.PodAdmitResult{ - Admit: true, - } + return lifecycle.PodAdmitResult{Admit: true} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go index 132f60c18f..2f525357df 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go @@ -17,6 +17,7 @@ limitations under the License. package configmap import ( + "context" "fmt" "time" @@ -61,7 +62,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager { } func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) { - return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) { @@ -120,7 +121,7 @@ const ( // value in cache; otherwise it is just fetched from cache func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts) + return kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts) } configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL) return &configMapManager{ @@ -136,16 +137,22 @@ func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.G // - every GetObject() returns a value from local cache propagated via watches func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager { listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).List(opts) + return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts) } watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).Watch(opts) + return kubeClient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), opts) } newConfigMap := func() runtime.Object { return &v1.ConfigMap{} } + isImmutable := func(object runtime.Object) bool { + if configMap, ok := object.(*v1.ConfigMap); ok { + return configMap.Immutable != nil && *configMap.Immutable + } + return false + } gr := corev1.Resource("configmap") return &configMapManager{ - manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, gr, getConfigMapNames), + manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, isImmutable, gr, getConfigMapNames), } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD index 751d42eeea..c3d42a1ff4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD @@ -51,7 +51,6 @@ go_library( "//pkg/kubelet/dockershim/network/kubenet:go_default_library", "//pkg/kubelet/kuberuntime:go_default_library", "//pkg/kubelet/leaky:go_default_library", - "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/server/streaming:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/cache:go_default_library", @@ -78,6 +77,7 @@ go_library( "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/winstats:go_default_library", + "//vendor/github.com/Microsoft/hcsshim:go_default_library", "//vendor/golang.org/x/sys/windows/registry:go_default_library", ], "//conditions:default": [], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD index bdf49dcae4..fa9b598113 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD @@ -18,7 +18,6 @@ go_library( "@io_bazel_rules_go//go/platform:android": [ "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", - "//pkg/kubelet/qos:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", @@ -40,7 +39,6 @@ go_library( "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", - "//pkg/kubelet/qos:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go index 37ab7dcb69..da38ec0d1b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" - "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -45,8 +44,10 @@ const ( // The minimum memory limit allocated to docker container: 150Mi minDockerMemoryLimit = 150 * 1024 * 1024 - // The Docker OOM score adjustment. - dockerOOMScoreAdj = qos.DockerOOMScoreAdj + // The OOM score adjustment for the docker process (i.e. the docker + // daemon). Essentially, makes docker very unlikely to experience an oom + // kill. + dockerOOMScoreAdj = -999 ) var ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go index 1824cdbadb..ac22c4d7ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go @@ -34,7 +34,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" - "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -44,6 +43,13 @@ const ( // Various default sandbox resources requests/limits. defaultSandboxCPUshares int64 = 2 + // defaultSandboxOOMAdj is the oom score adjustment for the docker + // sandbox container. Using this OOM adj makes it very unlikely, but not + // impossible, that the defaultSandox will experience an oom kill. -998 + // is chosen to signify sandbox should be OOM killed before other more + // vital processes like the docker daemon, the kubelet, etc... + defaultSandboxOOMAdj int = -998 + // Name of the underlying container runtime runtimeName = "docker" ) @@ -643,8 +649,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, createConfig.Config.ExposedPorts = exposedPorts hc.PortBindings = portBindings - // TODO: Get rid of the dependency on kubelet internal package. - hc.OomScoreAdj = qos.PodInfraOOMAdj + hc.OomScoreAdj = defaultSandboxOOMAdj // Apply resource options. if err := ds.applySandboxResources(hc, c.GetLinux()); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go index 053c11d93a..086624c473 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go @@ -22,7 +22,9 @@ import ( "context" "time" + "github.com/Microsoft/hcsshim" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "k8s.io/klog" ) func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { @@ -31,7 +33,18 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont return nil, err } - statsJSON, err := ds.client.GetContainerStats(containerID) + hcsshim_container, err := hcsshim.OpenContainer(containerID) + if err != nil { + return nil, err + } + defer func() { + closeErr := hcsshim_container.Close() + if closeErr != nil { + klog.Errorf("Error closing container '%s': %v", containerID, closeErr) + } + }() + + stats, err := hcsshim_container.Statistics() if err != nil { return nil, err } @@ -47,7 +60,6 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont } status := statusResp.GetStatus() - dockerStats := statsJSON.Stats timestamp := time.Now().UnixNano() containerStats := &runtimeapi.ContainerStats{ Attributes: &runtimeapi.ContainerAttributes{ @@ -58,13 +70,12 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont }, Cpu: &runtimeapi.CpuUsage{ Timestamp: timestamp, - // have to multiply cpu usage by 100 since docker stats units is in 100's of nano seconds for Windows - // see https://github.com/moby/moby/blob/v1.13.1/api/types/stats.go#L22 - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: dockerStats.CPUStats.CPUUsage.TotalUsage * 100}, + // have to multiply cpu usage by 100 since stats units is in 100's of nano seconds for Windows + UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: stats.Processor.TotalRuntime100ns * 100}, }, Memory: &runtimeapi.MemoryUsage{ Timestamp: timestamp, - WorkingSetBytes: &runtimeapi.UInt64Value{Value: dockerStats.MemoryStats.PrivateWorkingSet}, + WorkingSetBytes: &runtimeapi.UInt64Value{Value: stats.Memory.UsagePrivateWorkingSetBytes}, }, WritableLayer: &runtimeapi.FilesystemUsage{ Timestamp: timestamp, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index e2133daa28..2830f7c070 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -850,12 +850,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) if utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) { - // add sysctl admission - runtimeSupport, err := sysctl.NewRuntimeAdmitHandler(klet.containerRuntime) - if err != nil { - return nil, err - } - // Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec. // Hence, we concatenate those two lists. safeAndUnsafeSysctls := append(sysctlwhitelist.SafeSysctlWhitelist(), allowedUnsafeSysctls...) @@ -863,7 +857,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, if err != nil { return nil, err } - klet.admitHandlers.AddPodAdmitHandler(runtimeSupport) klet.admitHandlers.AddPodAdmitHandler(sysctlsWhitelist) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index 5ed554c0fe..28bf6a054b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -83,7 +83,7 @@ func (kl *Kubelet) registerWithAPIServer() { // value of the annotation for controller-managed attach-detach of attachable // persistent volumes for the node. func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { - _, err := kl.kubeClient.CoreV1().Nodes().Create(node) + _, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) if err == nil { return true } @@ -93,7 +93,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return false } - existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) + existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{}) if err != nil { klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false @@ -420,7 +420,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { if tryNumber == 0 { util.FromApiserverCache(&opts) } - node, err := kl.heartbeatClient.CoreV1().Nodes().Get(string(kl.nodeName), opts) + node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts) if err != nil { return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index bb109753e1..609303ac9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -1779,13 +1779,13 @@ func hasHostNamespace(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { - pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go index 8f023051b1..aa9c6746f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -17,6 +17,7 @@ limitations under the License. package checkpoint import ( + "context" "fmt" "math/rand" "time" @@ -191,7 +192,7 @@ func (r *remoteConfigMap) Download(client clientset.Interface, store cache.Store // if we didn't find the ConfigMap in the in-memory store, download it from the API server if cm == nil { utillog.Infof("attempting to download %s", r.APIPath()) - cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{}) + cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(context.TODO(), r.source.ConfigMap.Name, metav1.GetOptions{}) if err != nil { return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go index f6663a5874..b2de60c7c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go @@ -17,6 +17,7 @@ limitations under the License. package kubeletconfig import ( + "context" "fmt" "os" "time" @@ -198,7 +199,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc // because the event recorder won't flush its queue before we exit (we'd lose the event) event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) - if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { + if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { utillog.Errorf("failed to send event, error: %v", err) } utillog.Infof(message) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go index d1fdfeb8c9..ad2cf898c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go @@ -17,6 +17,7 @@ limitations under the License. package status import ( + "context" "fmt" "sync" @@ -158,7 +159,7 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) { }() // get the Node so we can check the current status - oldNode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err) return diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD index 6738b6edfd..ba16cec397 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD @@ -73,10 +73,14 @@ go_library( "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/qos:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/qos:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/apis:go_default_library", @@ -130,7 +134,17 @@ go_test( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go index 019f7dbdaf..483d335b9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go @@ -21,9 +21,12 @@ package kuberuntime import ( "time" - "k8s.io/api/core/v1" + cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" + v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "k8s.io/klog" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/qos" ) @@ -78,5 +81,48 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C lc.Resources.CpuPeriod = cpuPeriod } + lc.Resources.HugepageLimits = GetHugepageLimitsFromResources(container.Resources) + return lc } + +// GetHugepageLimitsFromResources returns limits of each hugepages from resources. +func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit { + var hugepageLimits []*runtimeapi.HugepageLimit + + // For each page size, limit to 0. + for _, pageSize := range cgroupfs.HugePageSizes { + hugepageLimits = append(hugepageLimits, &runtimeapi.HugepageLimit{ + PageSize: pageSize, + Limit: uint64(0), + }) + } + + requiredHugepageLimits := map[string]uint64{} + for resourceObj, amountObj := range resources.Limits { + if !v1helper.IsHugePageResourceName(resourceObj) { + continue + } + + pageSize, err := v1helper.HugePageSizeFromResourceName(resourceObj) + if err != nil { + klog.Warningf("Failed to get hugepage size from resource name: %v", err) + continue + } + + sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize.Value()) + if err != nil { + klog.Warningf("pageSize is invalid: %v", err) + continue + } + requiredHugepageLimits[sizeString] = uint64(amountObj.Value()) + } + + for _, hugepageLimit := range hugepageLimits { + if limit, exists := requiredHugepageLimits[hugepageLimit.PageSize]; exists { + hugepageLimit.Limit = limit + } + } + + return hugepageLimits +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go index 6f858b7482..5501592b8e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go @@ -28,6 +28,8 @@ import ( kubefeatures "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/securitycontext" + + "k8s.io/klog" ) // applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig. @@ -82,6 +84,28 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1 } wc.Resources.CpuShares = cpuShares + if !isolatedByHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUMaximum last. + if wc.Resources.CpuCount > 0 { + if wc.Resources.CpuShares > 0 { + wc.Resources.CpuShares = 0 + klog.Warningf("Mutually exclusive options: CPUCount priority > CPUShares priority on Windows Server Containers. CPUShares should be ignored") + } + if wc.Resources.CpuMaximum > 0 { + wc.Resources.CpuMaximum = 0 + klog.Warningf("Mutually exclusive options: CPUCount priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored") + } + } else if wc.Resources.CpuShares > 0 { + if wc.Resources.CpuMaximum > 0 { + wc.Resources.CpuMaximum = 0 + klog.Warningf("Mutually exclusive options: CPUShares priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored") + } + + } + } + memoryLimit := container.Resources.Limits.Memory().Value() if memoryLimit != 0 { wc.Resources.MemoryLimitInBytes = memoryLimit diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go index 93b0cd3939..58554fa200 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go @@ -51,11 +51,6 @@ const ( // timeFormat is the time format used in the log. timeFormat = time.RFC3339Nano - // stateCheckPeriod is the period to check container state while following - // the container log. Kubelet should not keep following the log when the - // container is not running. - stateCheckPeriod = 5 * time.Second - // logForceCheckPeriod is the period to check for a new read logForceCheckPeriod = 1 * time.Second ) @@ -447,10 +442,6 @@ func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeServic errRetry-- case <-time.After(logForceCheckPeriod): return true, false, nil - case <-time.After(stateCheckPeriod): - if running, err := isContainerRunning(id, runtimeService); !running { - return false, false, err - } } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD index 912e98d056..3b57741a58 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD @@ -49,7 +49,6 @@ go_test( "//pkg/kubelet/util/format:go_default_library", "//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodeports:go_default_library", - "//pkg/scheduler/framework/plugins/noderesources:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go index 0ed0f47357..2a0ef70463 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go @@ -178,7 +178,10 @@ func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.Nod // InsufficientResourceError is an error type that indicates what kind of resource limit is // hit and caused the unfitting failure. type InsufficientResourceError struct { - noderesources.InsufficientResource + ResourceName v1.ResourceName + Requested int64 + Used int64 + Capacity int64 } func (e *InsufficientResourceError) Error() string { @@ -224,7 +227,12 @@ func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) ([]Pre var reasons []PredicateFailureReason for _, r := range noderesources.Fits(pod, nodeInfo, nil) { - reasons = append(reasons, &InsufficientResourceError{InsufficientResource: r}) + reasons = append(reasons, &InsufficientResourceError{ + ResourceName: r.ResourceName, + Requested: r.Requested, + Used: r.Used, + Capacity: r.Capacity, + }) } if !pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, nodeInfo.Node()) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index 91ec35705b..9de5c75773 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -321,12 +321,12 @@ var ( }, ) // RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations. - // Broken down by RuntimeClass. + // Broken down by RuntimeClass.Handler. RunPodSandboxDuration = metrics.NewHistogramVec( &metrics.HistogramOpts{ Subsystem: KubeletSubsystem, Name: RunPodSandboxDurationKey, - Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.", + Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.Handler.", // Use DefBuckets for now, will customize the buckets if necessary. Buckets: metrics.DefBuckets, StabilityLevel: metrics.ALPHA, @@ -334,12 +334,12 @@ var ( []string{"runtime_handler"}, ) // RunPodSandboxErrors is a Counter that tracks the cumulative number of Pod Sandbox operations errors. - // Broken down by RuntimeClass. + // Broken down by RuntimeClass.Handler. RunPodSandboxErrors = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: KubeletSubsystem, Name: RunPodSandboxErrorsKey, - Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.", + Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.Handler.", StabilityLevel: metrics.ALPHA, }, []string{"runtime_handler"}, @@ -392,6 +392,8 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...metrics.S legacyregistry.MustRegister(DevicePluginAllocationDuration) legacyregistry.MustRegister(RunningContainerCount) legacyregistry.MustRegister(RunningPodCount) + legacyregistry.MustRegister(RunPodSandboxDuration) + legacyregistry.MustRegister(RunPodSandboxErrors) if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { legacyregistry.MustRegister(AssignedConfig) legacyregistry.MustRegister(ActiveConfig) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go index 073b7d2837..4641a06b7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go @@ -17,6 +17,7 @@ limitations under the License. package nodelease import ( + "context" "fmt" "time" @@ -142,7 +143,7 @@ func (c *controller) backoffEnsureLease() (*coordinationv1.Lease, bool) { // ensureLease creates the lease if it does not exist. Returns the lease and // a bool (true if this call created the lease), or any error that occurs. func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { - lease, err := c.leaseClient.Get(c.holderIdentity, metav1.GetOptions{}) + lease, err := c.leaseClient.Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}) if apierrors.IsNotFound(err) { // lease does not exist, create it. leaseToCreate := c.newLease(nil) @@ -152,7 +153,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // not create it this time - we will retry in the next iteration. return nil, false, nil } - lease, err := c.leaseClient.Create(leaseToCreate) + lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{}) if err != nil { return nil, false, err } @@ -169,7 +170,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // call this once you're sure the lease has been created func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error { for i := 0; i < maxUpdateRetries; i++ { - lease, err := c.leaseClient.Update(c.newLease(base)) + lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base), metav1.UpdateOptions{}) if err == nil { c.latestLease = lease return nil @@ -214,7 +215,7 @@ func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease // the connection between master and node is not ready yet. So try to set // owner reference every time when renewing the lease, until successful. if len(lease.OwnerReferences) == 0 { - if node, err := c.client.CoreV1().Nodes().Get(c.holderIdentity, metav1.GetOptions{}); err == nil { + if node, err := c.client.CoreV1().Nodes().Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}); err == nil { lease.OwnerReferences = []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD index 06f911a5bc..b8cbf8ccdd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/BUILD @@ -17,7 +17,6 @@ go_library( "//pkg/util/filesystem:go_default_library", "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go index 16dc639692..70dcb21bd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go @@ -17,6 +17,7 @@ limitations under the License. package pluginwatcher import ( + "context" "errors" "fmt" "net" @@ -24,7 +25,6 @@ import ( "sync" "time" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/klog" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go index ca2e434227..ac4cd855c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go @@ -17,6 +17,7 @@ limitations under the License. package pluginwatcher import ( + "context" "errors" "fmt" "net" @@ -24,7 +25,6 @@ import ( "sync" "time" - "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/klog" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go index fe7cd28c25..551d27df44 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -95,7 +96,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { Controller: &controller, }} - apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod) + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { @@ -123,7 +124,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) } klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid) var GracePeriodSeconds int64 - if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { + if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { // Unfortunately, there's no generic error for failing a precondition if !(apierrors.IsNotFound(err) || apierrors.IsConflict(err)) { // We should return the error here, but historically this routine does diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go index b1e8ed998b..f005fc59d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -23,15 +23,8 @@ import ( ) const ( - // PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make - // sense to set sandbox level oom score, e.g. a sandbox could only be a namespace - // without a process. - // TODO: Handle infra container oom score adj in a runtime agnostic way. - PodInfraOOMAdj int = -998 // KubeletOOMScoreAdj is the OOM score adjustment for Kubelet KubeletOOMScoreAdj int = -999 - // DockerOOMScoreAdj is the OOM score adjustment for Docker - DockerOOMScoreAdj int = -999 // KubeProxyOOMScoreAdj is the OOM score adjustment for kube-proxy KubeProxyOOMScoreAdj int = -999 guaranteedOOMScoreAdj int = -998 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go index 56843ed910..3aa132a5bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go @@ -17,6 +17,7 @@ limitations under the License. package secret import ( + "context" "fmt" "time" @@ -62,7 +63,7 @@ func NewSimpleSecretManager(kubeClient clientset.Interface) Manager { } func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) { - return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) { @@ -121,7 +122,7 @@ const ( // value in cache; otherwise it is just fetched from cache func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getSecret := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return kubeClient.CoreV1().Secrets(namespace).Get(name, opts) + return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts) } secretStore := manager.NewObjectStore(getSecret, clock.RealClock{}, getTTL, defaultTTL) return &secretManager{ @@ -137,16 +138,22 @@ func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetO // - every GetObject() returns a value from local cache propagated via watches func NewWatchingSecretManager(kubeClient clientset.Interface) Manager { listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { - return kubeClient.CoreV1().Secrets(namespace).List(opts) + return kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts) } watchSecret := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { - return kubeClient.CoreV1().Secrets(namespace).Watch(opts) + return kubeClient.CoreV1().Secrets(namespace).Watch(context.TODO(), opts) } newSecret := func() runtime.Object { return &v1.Secret{} } + isImmutable := func(object runtime.Object) bool { + if secret, ok := object.(*v1.Secret); ok { + return secret.Immutable != nil && *secret.Immutable + } + return false + } gr := corev1.Resource("secret") return &secretManager{ - manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, gr, getSecretNames), + manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, isImmutable, gr, getSecretNames), } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 26ab002655..5822b7303b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -89,6 +89,7 @@ type Server struct { auth AuthInterface host HostInterface restfulCont containerInterface + metricsBuckets map[string]bool resourceAnalyzer stats.ResourceAnalyzer redirectContainerStreaming bool } @@ -225,6 +226,7 @@ func NewServer( resourceAnalyzer: resourceAnalyzer, auth: auth, restfulCont: &filteringContainer{Container: restful.NewContainer()}, + metricsBuckets: make(map[string]bool), redirectContainerStreaming: redirectContainerStreaming, } if auth != nil { @@ -280,14 +282,32 @@ func (s *Server) InstallAuthFilter() { }) } +// addMetricsBucketMatcher adds a regexp matcher and the relevant bucket to use when +// it matches. Please be aware this is not thread safe and should not be used dynamically +func (s *Server) addMetricsBucketMatcher(bucket string) { + s.metricsBuckets[bucket] = true +} + +// getMetricBucket find the appropriate metrics reporting bucket for the given path +func (s *Server) getMetricBucket(path string) string { + root := getURLRootPath(path) + if s.metricsBuckets[root] == true { + return root + } + return "Invalid path" +} + // InstallDefaultHandlers registers the default set of supported HTTP request // patterns with the restful Container. func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { + s.addMetricsBucketMatcher("healthz") healthz.InstallHandler(s.restfulCont, healthz.PingHealthz, healthz.LogHealthz, healthz.NamedCheck("syncloop", s.syncLoopHealthCheck), ) + + s.addMetricsBucketMatcher("pods") ws := new(restful.WebService) ws. Path("/pods"). @@ -297,7 +317,14 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { Operation("getPods")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("stats") s.restfulCont.Add(stats.CreateHandlers(statsPath, s.host, s.resourceAnalyzer, enableCAdvisorJSONEndpoints)) + + s.addMetricsBucketMatcher("metrics") + s.addMetricsBucketMatcher("metrics/cadvisor") + s.addMetricsBucketMatcher("metrics/probes") + s.addMetricsBucketMatcher("metrics/resource/v1alpha1") + s.addMetricsBucketMatcher("metrics/resource") //lint:ignore SA1019 https://github.com/kubernetes/enhancements/issues/1206 s.restfulCont.Handle(metricsPath, legacyregistry.Handler()) @@ -321,12 +348,14 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { ) // deprecated endpoint which will be removed in release 1.20.0+. + s.addMetricsBucketMatcher("metrics/resource/v1alpha1") v1alpha1ResourceRegistry := compbasemetrics.NewKubeRegistry() v1alpha1ResourceRegistry.CustomMustRegister(stats.NewPrometheusResourceMetricCollector(s.resourceAnalyzer, v1alpha1.Config())) s.restfulCont.Handle(path.Join(resourceMetricsPath, v1alpha1.Version), compbasemetrics.HandlerFor(v1alpha1ResourceRegistry, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}), ) + s.addMetricsBucketMatcher("metrics/resource") resourceRegistry := compbasemetrics.NewKubeRegistry() resourceRegistry.CustomMustRegister(collectors.NewResourceMetricsCollector(s.resourceAnalyzer)) s.restfulCont.Handle(resourceMetricsPath, @@ -335,6 +364,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { // prober metrics are exposed under a different endpoint + s.addMetricsBucketMatcher("metrics/probes") p := compbasemetrics.NewKubeRegistry() _ = compbasemetrics.RegisterProcessStartTime(p.Register) p.MustRegister(prober.ProberResults) @@ -342,6 +372,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) { compbasemetrics.HandlerFor(p, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}), ) + s.addMetricsBucketMatcher("spec") if enableCAdvisorJSONEndpoints { ws := new(restful.WebService) ws. @@ -361,6 +392,7 @@ const pprofBasePath = "/debug/pprof/" func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { klog.Infof("Adding debug handlers to kubelet server.") + s.addMetricsBucketMatcher("run") ws := new(restful.WebService) ws. Path("/run") @@ -372,6 +404,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getRun")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("exec") ws = new(restful.WebService) ws. Path("/exec") @@ -389,6 +422,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getExec")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("attach") ws = new(restful.WebService) ws. Path("/attach") @@ -406,6 +440,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getAttach")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("portForward") ws = new(restful.WebService) ws. Path("/portForward") @@ -423,6 +458,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getPortForward")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("logs") ws = new(restful.WebService) ws. Path(logsPath) @@ -435,6 +471,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Param(ws.PathParameter("logpath", "path to the log").DataType("string"))) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("containerLogs") ws = new(restful.WebService) ws. Path("/containerLogs") @@ -443,8 +480,10 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getContainerLogs")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("configz") configz.InstallHandler(s.restfulCont) + s.addMetricsBucketMatcher("debug") handlePprofEndpoint := func(req *restful.Request, resp *restful.Response) { name := strings.TrimPrefix(req.Request.URL.Path, pprofBasePath) switch name { @@ -460,7 +499,6 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { pprof.Index(resp, req.Request) } } - // Setup pprof handlers. ws = new(restful.WebService).Path(pprofBasePath) ws.Route(ws.GET("/{subpath:*}").To(func(req *restful.Request, resp *restful.Response) { @@ -473,6 +511,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { s.restfulCont.Handle("/debug/flags/v", routes.StringFlagPutHandler(logs.GlogSetter)) // The /runningpods endpoint is used for testing only. + s.addMetricsBucketMatcher("runningpods") ws = new(restful.WebService) ws. Path("/runningpods/"). @@ -482,6 +521,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { Operation("getRunningPods")) s.restfulCont.Add(ws) + s.addMetricsBucketMatcher("cri") if criHandler != nil { s.restfulCont.Handle("/cri/", criHandler) } @@ -493,6 +533,14 @@ func (s *Server) InstallDebuggingDisabledHandlers() { http.Error(w, "Debug endpoints are disabled.", http.StatusMethodNotAllowed) }) + s.addMetricsBucketMatcher("run") + s.addMetricsBucketMatcher("exec") + s.addMetricsBucketMatcher("attach") + s.addMetricsBucketMatcher("portForward") + s.addMetricsBucketMatcher("containerLogs") + s.addMetricsBucketMatcher("runningpods") + s.addMetricsBucketMatcher("pprof") + s.addMetricsBucketMatcher("logs") paths := []string{ "/run/", "/exec/", "/attach/", "/portForward/", "/containerLogs/", "/runningpods/", pprofBasePath, logsPath} @@ -809,10 +857,10 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp proxyStream(response.ResponseWriter, request.Request, url) } -// trimURLPath trims a URL path. +// getURLRootPath trims a URL path. // For paths in the format of "/metrics/xxx", "metrics/xxx" is returned; // For all other paths, the first part of the path is returned. -func trimURLPath(path string) string { +func getURLRootPath(path string) string { parts := strings.SplitN(strings.TrimPrefix(path, "/"), "/", 3) if len(parts) == 0 { return path @@ -860,7 +908,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { serverType = "readwrite" } - method, path := req.Method, trimURLPath(req.URL.Path) + method, path := req.Method, s.getMetricBucket(req.URL.Path) longRunning := strconv.FormatBool(isLongRunningRequest(path)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go index dbc10153f2..737ae59844 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go @@ -649,7 +649,7 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi defer p.mutex.Unlock() cached, ok := p.cpuUsageCache[id] - if !ok || cached.stats.UsageCoreNanoSeconds == nil { + if !ok || cached.stats.UsageCoreNanoSeconds == nil || stats.Cpu.UsageCoreNanoSeconds.Value < cached.stats.UsageCoreNanoSeconds.Value { // Cannot compute the usage now, but update the cached stats anyway p.cpuUsageCache[id] = &cpuUsageRecord{stats: stats.Cpu, usageNanoCores: nil} return nil, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 1cb34e06a4..ad4eb16fa4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -17,6 +17,7 @@ limitations under the License. package status import ( + "context" "fmt" "sort" "sync" @@ -519,7 +520,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // TODO: make me easier to express from client code - pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) + pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(context.TODO(), status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { klog.V(3).Infof("Pod %q does not exist on the server", format.PodDesc(status.podName, status.podNamespace, uid)) // If the Pod is deleted the status will be cleared in @@ -556,7 +557,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions := metav1.NewDeleteOptions(0) // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) - err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) if err != nil { klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return @@ -621,8 +622,8 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // reconcile is not needed. Just return. return false } - klog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), - diff.ObjectDiff(podStatus, status)) + klog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %s", format.Pod(pod), + diff.ObjectDiff(podStatus, &status)) return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD index f8c52b92d9..d9d32b5abd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD @@ -10,14 +10,12 @@ go_library( name = "go_default_library", srcs = [ "namespace.go", - "runtime.go", "whitelist.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/sysctl", deps = [ "//pkg/apis/core/validation:go_default_library", "//pkg/apis/policy/validation:go_default_library", - "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go deleted file mode 100644 index 8b37c65c55..0000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sysctl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - -const ( - UnsupportedReason = "SysctlUnsupported" - // CRI uses semver-compatible API version, while docker does not - // (e.g., 1.24). Append the version with a ".0". - dockerMinimumAPIVersion = "1.24.0" - - dockerTypeName = "docker" -) - -// TODO: The admission logic in this file is runtime-dependent. It should be -// changed to be generic and CRI-compatible. - -type runtimeAdmitHandler struct { - result lifecycle.PodAdmitResult -} - -var _ lifecycle.PodAdmitHandler = &runtimeAdmitHandler{} - -// NewRuntimeAdmitHandler returns a sysctlRuntimeAdmitHandler which checks whether -// the given runtime support sysctls. -func NewRuntimeAdmitHandler(runtime container.Runtime) (*runtimeAdmitHandler, error) { - switch runtime.Type() { - case dockerTypeName: - v, err := runtime.APIVersion() - if err != nil { - return nil, fmt.Errorf("failed to get runtime version: %v", err) - } - - // only Docker API version >= 1.24 supports sysctls - c, err := v.Compare(dockerMinimumAPIVersion) - if err != nil { - return nil, fmt.Errorf("failed to compare Docker version for sysctl support: %v", err) - } - if c >= 0 { - return &runtimeAdmitHandler{ - result: lifecycle.PodAdmitResult{ - Admit: true, - }, - }, nil - } - return &runtimeAdmitHandler{ - result: lifecycle.PodAdmitResult{ - Admit: false, - Reason: UnsupportedReason, - Message: "Docker API version before 1.24 does not support sysctls", - }, - }, nil - default: - // Return admit for other runtimes. - return &runtimeAdmitHandler{ - result: lifecycle.PodAdmitResult{ - Admit: true, - }, - }, nil - } -} - -// Admit checks whether the runtime supports sysctls. -func (w *runtimeAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { - if attrs.Pod.Spec.SecurityContext != nil { - - if len(attrs.Pod.Spec.SecurityContext.Sysctls) > 0 { - return w.result - } - } - - return lifecycle.PodAdmitResult{ - Admit: true, - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD index 3980947498..6db84d1254 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD @@ -22,6 +22,7 @@ go_library( deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go index 6c2a98713c..72c067f74a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go @@ -19,6 +19,7 @@ limitations under the License. package token import ( + "context" "errors" "fmt" "sync" @@ -26,6 +27,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" @@ -64,7 +66,7 @@ func NewManager(c clientset.Interface) *Manager { if c == nil { return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode") } - tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr) + tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{}) if apierrors.IsNotFound(err) && !tokenRequestsSupported() { return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/BUILD index fbe9e111f8..c88f7023e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/BUILD @@ -10,6 +10,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/util/manager", visibility = ["//visibility:public"], deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -22,7 +23,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd3:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -36,7 +39,9 @@ go_test( deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/v1:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -44,9 +49,11 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go index 952326348f..56ff2af0eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go @@ -14,13 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO: We did some scalability tests and using watchBasedManager -// seems to help with apiserver performance at scale visibly. -// No issues we also observed at the scale of ~200k watchers with a -// single apiserver. -// However, we need to perform more extensive testing before we -// enable this in production setups. - package manager import ( @@ -31,6 +24,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/klog" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -39,18 +34,37 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error) type watchObjectFunc func(string, metav1.ListOptions) (watch.Interface, error) type newObjectFunc func() runtime.Object +type isImmutableFunc func(runtime.Object) bool // objectCacheItem is a single item stored in objectCache. type objectCacheItem struct { refCount int store cache.Store hasSynced func() (bool, error) - stopCh chan struct{} + + // lock is protecting from closing stopCh multiple times. + lock sync.Mutex + stopCh chan struct{} +} + +func (i *objectCacheItem) stop() bool { + i.lock.Lock() + defer i.lock.Unlock() + select { + case <-i.stopCh: + // This means that channel is already closed. + return false + default: + close(i.stopCh) + return true + } } // objectCache is a local cache of objects propagated via @@ -59,6 +73,7 @@ type objectCache struct { listObject listObjectFunc watchObject watchObjectFunc newObject newObjectFunc + isImmutable isImmutableFunc groupResource schema.GroupResource lock sync.RWMutex @@ -66,11 +81,17 @@ type objectCache struct { } // NewObjectCache returns a new watch-based instance of Store interface. -func NewObjectCache(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource) Store { +func NewObjectCache( + listObject listObjectFunc, + watchObject watchObjectFunc, + newObject newObjectFunc, + isImmutable isImmutableFunc, + groupResource schema.GroupResource) Store { return &objectCache{ listObject: listObject, watchObject: watchObject, newObject: newObject, + isImmutable: isImmutable, groupResource: groupResource, items: make(map[objectKey]*objectCacheItem), } @@ -140,7 +161,7 @@ func (c *objectCache) DeleteReference(namespace, name string) { item.refCount-- if item.refCount == 0 { // Stop the underlying reflector. - close(item.stopCh) + item.stop() delete(c.items, key) } } @@ -177,6 +198,21 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { return nil, apierrors.NewNotFound(c.groupResource, name) } if object, ok := obj.(runtime.Object); ok { + // If the returned object is immutable, stop the reflector. + // + // NOTE: we may potentially not even start the reflector if the object is + // already immutable. However, given that: + // - we want to also handle the case when object is marked as immutable later + // - Secrets and ConfigMaps are periodically fetched by volumemanager anyway + // - doing that wouldn't provide visible scalability/performance gain - we + // already have it from here + // - doing that would require significant refactoring to reflector + // we limit ourselves to just quickly stop the reflector here. + if utilfeature.DefaultFeatureGate.Enabled(features.ImmutableEphemeralVolumes) && c.isImmutable(object) { + if item.stop() { + klog.V(4).Infof("Stopped watching for changes of %q/%q - object is immutable", namespace, name) + } + } return object, nil } return nil, fmt.Errorf("unexpected object type: %v", obj) @@ -188,7 +224,13 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { // - whenever a pod is created or updated, we start individual watches for all // referenced objects that aren't referenced from other registered pods // - every GetObject() returns a value from local cache propagated via watches -func NewWatchBasedManager(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource, getReferencedObjects func(*v1.Pod) sets.String) Manager { - objectStore := NewObjectCache(listObject, watchObject, newObject, groupResource) +func NewWatchBasedManager( + listObject listObjectFunc, + watchObject watchObjectFunc, + newObject newObjectFunc, + isImmutable isImmutableFunc, + groupResource schema.GroupResource, + getReferencedObjects func(*v1.Pod) sets.String) Manager { + objectStore := NewObjectCache(listObject, watchObject, newObject, isImmutable, groupResource) return NewCacheBasedManager(objectStore, getReferencedObjects) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index fce0d83991..677ba61eee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -21,6 +21,7 @@ caches in sync with the "ground truth". package populator import ( + "context" "errors" "fmt" "sync" @@ -576,7 +577,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( namespace string, claimName string) (*v1.PersistentVolumeClaim, error) { pvc, err := - dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) if err != nil || pvc == nil { return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err) } @@ -612,7 +613,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { - pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) + pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %s from API server: %v", name, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index adb2e2038c..5ac1fa37cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -20,6 +20,7 @@ limitations under the License. package reconciler import ( + "context" "fmt" "io/ioutil" "os" @@ -599,7 +600,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, // updateDevicePath gets the node status to retrieve volume device path information. func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) { - node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) + node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { klog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go index d5ce070c7a..8068ef09ac 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go @@ -17,6 +17,7 @@ limitations under the License. package kubemark import ( + "context" "fmt" "math/rand" "sync" @@ -226,7 +227,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin var err error for i := 0; i < numRetries; i++ { - _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(node) + _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node, metav1.CreateOptions{}) if err == nil { return nil } @@ -247,8 +248,7 @@ func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup policy := metav1.DeletePropagationForeground var err error for i := 0; i < numRetries; i++ { - err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete( - pod.ObjectMeta.Labels["name"], + err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete(context.TODO(), pod.ObjectMeta.Labels["name"], &metav1.DeleteOptions{PropagationPolicy: &policy}) if err == nil { klog.Infof("marking node %s for deletion", node) @@ -374,7 +374,7 @@ func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, defer kubemarkCluster.nodesToDeleteLock.Unlock() if kubemarkCluster.nodesToDelete[node.Name] { kubemarkCluster.nodesToDelete[node.Name] = false - if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil { + if err := kubemarkCluster.client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}); err != nil { klog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/BUILD index 8c5c7ab447..ac34aeb493 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/BUILD @@ -17,8 +17,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/config:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go index 773dd542dc..63557542a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go @@ -26,11 +26,13 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + utilfeature "k8s.io/apiserver/pkg/util/feature" componentbaseconfig "k8s.io/component-base/config" "k8s.io/component-base/metrics" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" kubefeatures "k8s.io/kubernetes/pkg/features" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" + netutils "k8s.io/utils/net" ) // Validate validates the configuration of kube-proxy @@ -39,6 +41,11 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { newPath := field.NewPath("KubeProxyConfiguration") + effectiveFeatures := utilfeature.DefaultFeatureGate.DeepCopy() + if err := effectiveFeatures.SetFromMap(config.FeatureGates); err != nil { + allErrs = append(allErrs, field.Invalid(newPath.Child("featureGates"), config.FeatureGates, err.Error())) + } + allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...) if config.Mode == kubeproxyconfig.ProxyModeIPVS { allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...) @@ -69,16 +76,26 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...) if config.ClusterCIDR != "" { - if config.FeatureGates[string(kubefeatures.IPv6DualStack)] { - cidrs := strings.Split(config.ClusterCIDR, ",") - for _, cidr := range cidrs { - if _, _, err := net.ParseCIDR(cidr); err != nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), cidr, "must be a valid CIDR block (e.g. 10.100.0.0/16 or FD02::0:0:0/96)")) - } + cidrs := strings.Split(config.ClusterCIDR, ",") + dualStackEnabled := effectiveFeatures.Enabled(kubefeatures.IPv6DualStack) + + switch { + // if DualStack only valid one cidr or two cidrs with one of each IP family + case dualStackEnabled && len(cidrs) > 2: + allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed or a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")) + // if DualStack and two cidrs validate if there is at least one of each IP family + case dualStackEnabled && len(cidrs) == 2: + isDual, err := netutils.IsDualStackCIDRStrings(cidrs) + if err != nil || !isDual { + allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")) } - } else { + // if not DualStack only one CIDR allowed + case !dualStackEnabled && len(cidrs) > 1: + allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)")) + // if we are here means that len(cidrs) == 1, we need to validate it + default: if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or FD02::0:0:0/96)")) + allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)")) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 3d6ac101e5..1d082acb47 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -796,6 +796,16 @@ func (proxier *Proxier) syncProxyRules() { klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() + localAddrs, err := utilproxy.GetLocalAddrs() + if err != nil { + klog.Errorf("Failed to get local addresses during proxy sync: %v, assuming external IPs are not local", err) + } else if len(localAddrs) == 0 { + klog.Warning("No local addresses found, assuming all external IPs are not local") + } + + localAddrSet := utilnet.IPSet{} + localAddrSet.Insert(localAddrs...) + // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. @@ -848,7 +858,7 @@ func (proxier *Proxier) syncProxyRules() { // This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore existingFilterChains := make(map[utiliptables.Chain][]byte) proxier.existingFilterChainsData.Reset() - err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData) + err = proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData) if err != nil { // if we failed to get any rules klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output @@ -1030,9 +1040,7 @@ func (proxier *Proxier) syncProxyRules() { // If the "external" IP happens to be an IP that is local to this // machine, hold the local port open so no other process can open it // (because the socket might open but it would never work). - if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - klog.Errorf("can't determine if IP is local, assuming not: %v", err) - } else if local && (svcInfo.Protocol() != v1.ProtocolSCTP) { + if localAddrSet.Len() > 0 && (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) { lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, IP: externalIP, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index 83c9a01b3b..473294e03c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -990,33 +990,6 @@ func (proxier *Proxier) OnNodeSynced() { // EntryInvalidErr indicates if an ipset entry is invalid or not const EntryInvalidErr = "error adding entry %s to ipset %s" -func getLocalAddrs() ([]net.IP, error) { - var localAddrs []net.IP - - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil, err - } - - for _, addr := range addrs { - ip, _, err := net.ParseCIDR(addr.String()) - if err != nil { - return nil, err - } - localAddrs = append(localAddrs, ip) - } - return localAddrs, nil -} - -func ipExists(ip net.IP, addrs []net.IP) bool { - for _, addr := range addrs { - if ip.Equal(addr) { - return true - } - } - return false -} - // This is where all of the ipvs calls happen. // assumes proxier.mu is held func (proxier *Proxier) syncProxyRules() { @@ -1036,11 +1009,16 @@ func (proxier *Proxier) syncProxyRules() { klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() - localAddrs, err := getLocalAddrs() + localAddrs, err := utilproxy.GetLocalAddrs() if err != nil { - klog.Errorf("Failed to get local addresses during proxy sync: %v", err) + klog.Errorf("Failed to get local addresses during proxy sync: %v, assuming external IPs are not local", err) + } else if len(localAddrs) == 0 { + klog.Warning("No local addresses found, assuming all external IPs are not local") } + localAddrSet := utilnet.IPSet{} + localAddrSet.Insert(localAddrs...) + // We assume that if this was called, we really want to sync them, // even if nothing changed in the meantime. In other words, callers are // responsible for detecting no-op changes and not calling this function. @@ -1222,9 +1200,10 @@ func (proxier *Proxier) syncProxyRules() { // Capture externalIPs. for _, externalIP := range svcInfo.ExternalIPStrings() { - if len(localAddrs) == 0 { - klog.Errorf("couldn't find any local IPs, assuming %s is not local", externalIP) - } else if (svcInfo.Protocol() != v1.ProtocolSCTP) && ipExists(net.ParseIP(externalIP), localAddrs) { + // If the "external" IP happens to be an IP that is local to this + // machine, hold the local port open so no other process can open it + // (because the socket might open but it would never work). + if localAddrSet.Len() > 0 && (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) { // We do not start listening on SCTP ports, according to our agreement in the SCTP support KEP lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD index 9c76a02c01..79348fdb79 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD @@ -36,6 +36,7 @@ go_library( "//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go index 7a34529d6f..49bd24f705 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/util/conntrack" "k8s.io/kubernetes/pkg/util/iptables" utilexec "k8s.io/utils/exec" + netutils "k8s.io/utils/net" ) type portal struct { @@ -127,6 +128,7 @@ type Proxier struct { listenIP net.IP iptables iptables.Interface hostIP net.IP + localAddrs netutils.IPSet proxyPorts PortAllocator makeProxySocket ProxySocketFunc exec utilexec.Interface @@ -371,6 +373,17 @@ func (proxier *Proxier) syncProxyRules() { proxier.unmergeService(change.previous, existingPorts) } + localAddrs, err := utilproxy.GetLocalAddrs() + if err != nil { + klog.Errorf("Failed to get local addresses during proxy sync: %s, assuming IPs are not local", err) + } else if len(localAddrs) == 0 { + klog.Warning("No local addresses were found, assuming all external IPs are not local") + } + + localAddrSet := netutils.IPSet{} + localAddrSet.Insert(localAddrs...) + proxier.localAddrs = localAddrSet + proxier.ensurePortals() proxier.cleanupStaleStickySessions() } @@ -725,9 +738,7 @@ func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceI } func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error { - if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil { - return fmt.Errorf("can't determine if IP %s is local, assuming not: %v", portal.ip, err) - } else if local { + if proxier.localAddrs.Len() > 0 && proxier.localAddrs.Has(portal.ip) { err := proxier.claimNodePort(portal.ip, portal.port, protocol, name) if err != nil { return err @@ -903,10 +914,7 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error { el := []error{} - - if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil { - el = append(el, fmt.Errorf("can't determine if IP %s is local, assuming not: %v", portal.ip, err)) - } else if local { + if proxier.localAddrs.Len() > 0 && proxier.localAddrs.Has(portal.ip) { if err := proxier.releaseNodePort(portal.ip, portal.port, protocol, name); err != nil { el = append(el, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index fb3fe87001..b4f1b4304c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -123,23 +123,25 @@ func IsProxyableHostname(ctx context.Context, resolv Resolver, hostname string) return nil } -// IsLocalIP checks if a given IP address is bound to an interface -// on the local system -func IsLocalIP(ip string) (bool, error) { +// GetLocalAddrs returns a list of all network addresses on the local system +func GetLocalAddrs() ([]net.IP, error) { + var localAddrs []net.IP + addrs, err := net.InterfaceAddrs() if err != nil { - return false, err + return nil, err } - for i := range addrs { - intf, _, err := net.ParseCIDR(addrs[i].String()) + + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) if err != nil { - return false, err - } - if net.ParseIP(ip).Equal(intf) { - return true, nil + return nil, err } + + localAddrs = append(localAddrs, ip) } - return false, nil + + return localAddrs, nil } // ShouldSkipService checks if a given service should skip proxying diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV1.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV1.go index ed3545ea30..3787f4514e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV1.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV1.go @@ -75,6 +75,9 @@ func (hns hnsV1) getEndpointByIpAddress(ip string, networkName string) (*endpoin } endpoints, err := hcsshim.HNSListEndpointRequest() + if err != nil { + return nil, fmt.Errorf("Failed to list endpoints: %v", err) + } for _, endpoint := range endpoints { equal := false if endpoint.IPAddress != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV2.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV2.go index fbfbecf2c4..4898c2474f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV2.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hnsV2.go @@ -83,6 +83,9 @@ func (hns hnsV2) getEndpointByIpAddress(ip string, networkName string) (*endpoin } endpoints, err := hcn.ListEndpoints() + if err != nil { + return nil, fmt.Errorf("Failed to list endpoints: %v", err) + } for _, endpoint := range endpoints { equal := false if endpoint.IpConfigurations != nil && len(endpoint.IpConfigurations) > 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD index 9c82fa07dd..c34ae8b92c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD @@ -30,7 +30,6 @@ go_library( "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", "//pkg/scheduler/framework/plugins/volumezone:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -60,7 +59,6 @@ go_test( "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", "//pkg/scheduler/framework/plugins/volumezone:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/google/go-cmp/cmp:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/registry.go index 093a60c519..a992d63ea2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/registry.go @@ -17,11 +17,9 @@ limitations under the License. package algorithmprovider import ( - "fmt" "sort" "strings" - "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" "k8s.io/kubernetes/pkg/features" @@ -48,21 +46,15 @@ import ( // ClusterAutoscalerProvider defines the default autoscaler provider const ClusterAutoscalerProvider = "ClusterAutoscalerProvider" -// Config the configuration of an algorithm provider. -type Config struct { - FrameworkPlugins *schedulerapi.Plugins - FrameworkPluginConfig []schedulerapi.PluginConfig -} - // Registry is a collection of all available algorithm providers. -type Registry map[string]*Config +type Registry map[string]*schedulerapi.Plugins // NewRegistry returns an algorithm provider registry instance. -func NewRegistry(hardPodAffinityWeight int64) Registry { - defaultConfig := getDefaultConfig(hardPodAffinityWeight) +func NewRegistry() Registry { + defaultConfig := getDefaultConfig() applyFeatureGates(defaultConfig) - caConfig := getClusterAutoscalerConfig(hardPodAffinityWeight) + caConfig := getClusterAutoscalerConfig() applyFeatureGates(caConfig) return Registry{ @@ -73,7 +65,7 @@ func NewRegistry(hardPodAffinityWeight int64) Registry { // ListAlgorithmProviders lists registered algorithm providers. func ListAlgorithmProviders() string { - r := NewRegistry(1) + r := NewRegistry() var providers []string for k := range r { providers = append(providers, k) @@ -82,109 +74,94 @@ func ListAlgorithmProviders() string { return strings.Join(providers, " | ") } -func getDefaultConfig(hardPodAffinityWeight int64) *Config { - return &Config{ - FrameworkPlugins: &schedulerapi.Plugins{ - QueueSort: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: queuesort.Name}, - }, - }, - PreFilter: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: noderesources.FitName}, - {Name: nodeports.Name}, - {Name: interpodaffinity.Name}, - }, - }, - Filter: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: nodeunschedulable.Name}, - {Name: noderesources.FitName}, - {Name: nodename.Name}, - {Name: nodeports.Name}, - {Name: nodeaffinity.Name}, - {Name: volumerestrictions.Name}, - {Name: tainttoleration.Name}, - {Name: nodevolumelimits.EBSName}, - {Name: nodevolumelimits.GCEPDName}, - {Name: nodevolumelimits.CSIName}, - {Name: nodevolumelimits.AzureDiskName}, - {Name: volumebinding.Name}, - {Name: volumezone.Name}, - {Name: interpodaffinity.Name}, - }, - }, - PostFilter: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: interpodaffinity.Name}, - {Name: defaultpodtopologyspread.Name}, - {Name: tainttoleration.Name}, - }, - }, - Score: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: noderesources.BalancedAllocationName, Weight: 1}, - {Name: imagelocality.Name, Weight: 1}, - {Name: interpodaffinity.Name, Weight: 1}, - {Name: noderesources.LeastAllocatedName, Weight: 1}, - {Name: nodeaffinity.Name, Weight: 1}, - {Name: nodepreferavoidpods.Name, Weight: 10000}, - {Name: defaultpodtopologyspread.Name, Weight: 1}, - {Name: tainttoleration.Name, Weight: 1}, - }, - }, - Bind: &schedulerapi.PluginSet{ - Enabled: []schedulerapi.Plugin{ - {Name: defaultbinder.Name}, - }, +func getDefaultConfig() *schedulerapi.Plugins { + return &schedulerapi.Plugins{ + QueueSort: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: queuesort.Name}, }, }, - FrameworkPluginConfig: []schedulerapi.PluginConfig{ - { - Name: interpodaffinity.Name, - Args: runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, hardPodAffinityWeight))}, + PreFilter: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: noderesources.FitName}, + {Name: nodeports.Name}, + {Name: interpodaffinity.Name}, + }, + }, + Filter: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: nodeunschedulable.Name}, + {Name: noderesources.FitName}, + {Name: nodename.Name}, + {Name: nodeports.Name}, + {Name: nodeaffinity.Name}, + {Name: volumerestrictions.Name}, + {Name: tainttoleration.Name}, + {Name: nodevolumelimits.EBSName}, + {Name: nodevolumelimits.GCEPDName}, + {Name: nodevolumelimits.CSIName}, + {Name: nodevolumelimits.AzureDiskName}, + {Name: volumebinding.Name}, + {Name: volumezone.Name}, + {Name: interpodaffinity.Name}, + }, + }, + PreScore: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: interpodaffinity.Name}, + {Name: defaultpodtopologyspread.Name}, + {Name: tainttoleration.Name}, + }, + }, + Score: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: noderesources.BalancedAllocationName, Weight: 1}, + {Name: imagelocality.Name, Weight: 1}, + {Name: interpodaffinity.Name, Weight: 1}, + {Name: noderesources.LeastAllocatedName, Weight: 1}, + {Name: nodeaffinity.Name, Weight: 1}, + {Name: nodepreferavoidpods.Name, Weight: 10000}, + {Name: defaultpodtopologyspread.Name, Weight: 1}, + {Name: tainttoleration.Name, Weight: 1}, + }, + }, + Bind: &schedulerapi.PluginSet{ + Enabled: []schedulerapi.Plugin{ + {Name: defaultbinder.Name}, }, }, } } -func getClusterAutoscalerConfig(hardPodAffinityWeight int64) *Config { - defaultConfig := getDefaultConfig(hardPodAffinityWeight) - caConfig := Config{ - FrameworkPlugins: &schedulerapi.Plugins{}, - } - defaultConfig.FrameworkPlugins.DeepCopyInto(caConfig.FrameworkPlugins) - caConfig.FrameworkPluginConfig = append([]schedulerapi.PluginConfig(nil), defaultConfig.FrameworkPluginConfig...) - +func getClusterAutoscalerConfig() *schedulerapi.Plugins { + caConfig := getDefaultConfig() // Replace least with most requested. - for i := range caConfig.FrameworkPlugins.Score.Enabled { - if caConfig.FrameworkPlugins.Score.Enabled[i].Name == noderesources.LeastAllocatedName { - caConfig.FrameworkPlugins.Score.Enabled[i].Name = noderesources.MostAllocatedName + for i := range caConfig.Score.Enabled { + if caConfig.Score.Enabled[i].Name == noderesources.LeastAllocatedName { + caConfig.Score.Enabled[i].Name = noderesources.MostAllocatedName } } - - return &caConfig + return caConfig } -func applyFeatureGates(config *Config) { +func applyFeatureGates(config *schedulerapi.Plugins) { // Only add EvenPodsSpread if the feature is enabled. if utilfeature.DefaultFeatureGate.Enabled(features.EvenPodsSpread) { klog.Infof("Registering EvenPodsSpread predicate and priority function") f := schedulerapi.Plugin{Name: podtopologyspread.Name} - config.FrameworkPlugins.PreFilter.Enabled = append(config.FrameworkPlugins.PreFilter.Enabled, f) - config.FrameworkPlugins.Filter.Enabled = append(config.FrameworkPlugins.Filter.Enabled, f) - config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, f) + config.PreFilter.Enabled = append(config.PreFilter.Enabled, f) + config.Filter.Enabled = append(config.Filter.Enabled, f) + config.PreScore.Enabled = append(config.PreScore.Enabled, f) s := schedulerapi.Plugin{Name: podtopologyspread.Name, Weight: 1} - config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s) + config.Score.Enabled = append(config.Score.Enabled, s) } // Prioritizes nodes that satisfy pod's resource limits if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { klog.Infof("Registering resourcelimits priority function") s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName} - config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, s) + config.PreScore.Enabled = append(config.PreScore.Enabled, s) s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1} - config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s) + config.Score.Enabled = append(config.Score.Enabled, s) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD index 501c7f4281..2c3fec71ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/BUILD @@ -35,6 +35,7 @@ filegroup( "//pkg/scheduler/apis/config/testing:all-srcs", "//pkg/scheduler/apis/config/v1:all-srcs", "//pkg/scheduler/apis/config/v1alpha1:all-srcs", + "//pkg/scheduler/apis/config/v1alpha2:all-srcs", "//pkg/scheduler/apis/config/validation:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go index 7942f8469b..4b6ceab8c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go @@ -168,8 +168,8 @@ type Plugins struct { // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. Filter *PluginSet - // PostFilter is a list of plugins that are invoked after filtering out infeasible nodes. - PostFilter *PluginSet + // PreScore is a list of plugins that are invoked before scoring. + PreScore *PluginSet // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. Score *PluginSet @@ -262,7 +262,7 @@ func (p *Plugins) Append(src *Plugins) { p.QueueSort = appendPluginSet(p.QueueSort, src.QueueSort) p.PreFilter = appendPluginSet(p.PreFilter, src.PreFilter) p.Filter = appendPluginSet(p.Filter, src.Filter) - p.PostFilter = appendPluginSet(p.PostFilter, src.PostFilter) + p.PreScore = appendPluginSet(p.PreScore, src.PreScore) p.Score = appendPluginSet(p.Score, src.Score) p.Reserve = appendPluginSet(p.Reserve, src.Reserve) p.Permit = appendPluginSet(p.Permit, src.Permit) @@ -281,7 +281,7 @@ func (p *Plugins) Apply(customPlugins *Plugins) { p.QueueSort = mergePluginSets(p.QueueSort, customPlugins.QueueSort) p.PreFilter = mergePluginSets(p.PreFilter, customPlugins.PreFilter) p.Filter = mergePluginSets(p.Filter, customPlugins.Filter) - p.PostFilter = mergePluginSets(p.PostFilter, customPlugins.PostFilter) + p.PreScore = mergePluginSets(p.PreScore, customPlugins.PreScore) p.Score = mergePluginSets(p.Score, customPlugins.Score) p.Reserve = mergePluginSets(p.Reserve, customPlugins.Reserve) p.Permit = mergePluginSets(p.Permit, customPlugins.Permit) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go index 20b0f03e6e..ce121d258c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go @@ -269,8 +269,8 @@ func (in *Plugins) DeepCopyInto(out *Plugins) { *out = new(PluginSet) (*in).DeepCopyInto(*out) } - if in.PostFilter != nil { - in, out := &in.PostFilter, &out.PostFilter + if in.PreScore != nil { + in, out := &in.PreScore, &out.PreScore *out = new(PluginSet) (*in).DeepCopyInto(*out) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go index 0e6ca52cce..dd78ae051b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go @@ -43,8 +43,8 @@ var _ framework.ScorePlugin = &DefaultPodTopologySpread{} const ( // Name is the name of the plugin used in the plugin registry and configurations. Name = "DefaultPodTopologySpread" - // postFilterStateKey is the key in CycleState to DefaultPodTopologySpread pre-computed data for Scoring. - postFilterStateKey = "PostFilter" + Name + // preScoreStateKey is the key in CycleState to DefaultPodTopologySpread pre-computed data for Scoring. + preScoreStateKey = "PreScore" + Name // When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading // TODO: Any way to justify this weighting? @@ -56,29 +56,38 @@ func (pl *DefaultPodTopologySpread) Name() string { return Name } -// postFilterState computed at PostFilter and used at Score. -type postFilterState struct { +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { selector labels.Selector } // Clone implements the mandatory Clone interface. We don't really copy the data since // there is no need for that. -func (s *postFilterState) Clone() framework.StateData { +func (s *preScoreState) Clone() framework.StateData { return s } +// skipDefaultPodTopologySpread returns true if the pod's TopologySpreadConstraints are specified. +func skipDefaultPodTopologySpread(pod *v1.Pod) bool { + return len(pod.Spec.TopologySpreadConstraints) != 0 +} + // Score invoked at the Score extension point. // The "score" returned in this function is the matching number of pods on the `nodeName`, // it is normalized later. func (pl *DefaultPodTopologySpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { - c, err := state.Read(postFilterStateKey) - if err != nil { - return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error reading %q from cycleState: %v", postFilterStateKey, err)) + if skipDefaultPodTopologySpread(pod) { + return 0, nil } - s, ok := c.(*postFilterState) + c, err := state.Read(preScoreStateKey) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error reading %q from cycleState: %v", preScoreStateKey, err)) + } + + s, ok := c.(*preScoreState) if !ok { - return 0, framework.NewStatus(framework.Error, fmt.Sprintf("%+v convert to tainttoleration.postFilterState error", c)) + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("%+v convert to tainttoleration.preScoreState error", c)) } nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) @@ -96,6 +105,10 @@ func (pl *DefaultPodTopologySpread) Score(ctx context.Context, state *framework. // where zone information is included on the nodes, it favors nodes // in zones with fewer existing matching pods. func (pl *DefaultPodTopologySpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + if skipDefaultPodTopologySpread(pod) { + return nil + } + countsByZone := make(map[string]int64, 10) maxCountByZone := int64(0) maxCountByNodeName := int64(0) @@ -164,8 +177,8 @@ func (pl *DefaultPodTopologySpread) ScoreExtensions() framework.ScoreExtensions return pl } -// PostFilter builds and writes cycle state used by Score and NormalizeScore. -func (pl *DefaultPodTopologySpread) PostFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, _ framework.NodeToStatusMap) *framework.Status { +// PreScore builds and writes cycle state used by Score and NormalizeScore. +func (pl *DefaultPodTopologySpread) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, _ framework.NodeToStatusMap) *framework.Status { var selector labels.Selector informerFactory := pl.handle.SharedInformerFactory() selector = getSelector( @@ -175,10 +188,10 @@ func (pl *DefaultPodTopologySpread) PostFilter(ctx context.Context, cycleState * informerFactory.Apps().V1().ReplicaSets().Lister(), informerFactory.Apps().V1().StatefulSets().Lister(), ) - state := &postFilterState{ + state := &preScoreState{ selector: selector, } - cycleState.Write(postFilterStateKey, state) + cycleState.Write(preScoreStateKey, state) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index 0a6ffe5749..af03526062 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -388,10 +388,8 @@ func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { - // The preFilterState wasn't pre-computed in prefilter. We ignore the error for now since - // Filter is able to handle that by computing it again. - klog.V(5).Infof("Error reading %q from cycleState: %v", preFilterStateKey, err) - return nil, nil + // preFilterState doesn't exist, likely PreFilter wasn't invoked. + return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } s, ok := c.(*preFilterState) @@ -405,20 +403,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error // terms indicated by the existing pods. func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *nodeinfo.NodeInfo) (bool, error) { node := nodeInfo.Node() - var topologyMap topologyToMatchedTermCount - if state != nil { - topologyMap = state.topologyToMatchedExistingAntiAffinityTerms - } else { - // Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not - // present in nodeInfo. Pods on other nodes pass the filter. - filteredPods, err := pl.sharedLister.Pods().FilteredList(nodeInfo.Filter, labels.Everything()) - if err != nil { - return false, fmt.Errorf("Failed to get all pods: %v", err) - } - if topologyMap, err = pl.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil { - return false, fmt.Errorf("Failed to get all terms that match pod: %v", err) - } - } + topologyMap := state.topologyToMatchedExistingAntiAffinityTerms // Iterate over topology pairs to get any of the pods being affected by // the scheduled pod anti-affinity terms @@ -462,37 +447,6 @@ func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTerm return false } -// podMatchesPodAffinityTerms checks if the "targetPod" matches the given "terms" -// of the "pod" on the given "nodeInfo".Node(). It returns three values: 1) whether -// targetPod matches all the terms and their topologies, 2) whether targetPod -// matches all the terms label selector and namespaces 3) any error. -func (pl *InterPodAffinity) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *nodeinfo.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { - if len(terms) == 0 { - return false, false, fmt.Errorf("terms array is empty") - } - props, err := getAffinityTerms(pod, terms) - if err != nil { - return false, false, err - } - if !podMatchesAllAffinityTerms(targetPod, props) { - return false, false, nil - } - // Namespace and selector of the terms have matched. Now we check topology of the terms. - targetPodNodeInfo, err := pl.sharedLister.NodeInfos().Get(targetPod.Spec.NodeName) - if err != nil { - return false, false, err - } - for _, term := range terms { - if len(term.TopologyKey) == 0 { - return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") - } - if !schedutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo.Node(), term.TopologyKey) { - return false, true, nil - } - } - return true, true, nil -} - // getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node: // (1) Whether it has PodAntiAffinity // (2) Whether ANY AffinityTerm matches the incoming pod @@ -519,24 +473,6 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P return topologyMap, nil } -func (pl *InterPodAffinity) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (topologyToMatchedTermCount, error) { - topologyMaps := make(topologyToMatchedTermCount) - - for _, existingPod := range existingPods { - existingPodNodeInfo, err := pl.sharedLister.NodeInfos().Get(existingPod.Spec.NodeName) - if err != nil { - klog.Errorf("Pod %s has NodeName %q but node is not found", existingPod.Name, existingPod.Spec.NodeName) - continue - } - existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNodeInfo.Node()) - if err != nil { - return nil, err - } - topologyMaps.append(existingPodTopologyMaps) - } - return topologyMaps, nil -} - // satisfiesPodsAffinityAntiAffinity checks if scheduling the pod onto this node would break any term of this pod. // This function returns two boolean flags. The first boolean flag indicates whether the pod matches affinity rules // or not. The second boolean flag indicates if the pod matches anti-affinity rules. @@ -547,79 +483,31 @@ func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, if node == nil { return false, false, fmt.Errorf("node not found") } - if state != nil { - // Check all affinity terms. - topologyToMatchedAffinityTerms := state.topologyToMatchedAffinityTerms - if affinityTerms := schedutil.GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { - matchExists := nodeMatchesAllTopologyTerms(pod, topologyToMatchedAffinityTerms, nodeInfo, affinityTerms) - if !matchExists { - // This pod may the first pod in a series that have affinity to themselves. In order - // to not leave such pods in pending state forever, we check that if no other pod - // in the cluster matches the namespace and selector of this pod and the pod matches - // its own terms, then we allow the pod to pass the affinity check. - if len(topologyToMatchedAffinityTerms) != 0 || !targetPodMatchesAffinityOfPod(pod, pod) { - return false, false, nil - } - } - } - // Check all anti-affinity terms. - topologyToMatchedAntiAffinityTerms := state.topologyToMatchedAntiAffinityTerms - if antiAffinityTerms := schedutil.GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { - matchExists := nodeMatchesAnyTopologyTerm(pod, topologyToMatchedAntiAffinityTerms, nodeInfo, antiAffinityTerms) - if matchExists { - return true, false, nil - } - } - } else { // We don't have precomputed preFilterState. We have to follow a slow path to check affinity terms. - filteredPods, err := pl.sharedLister.Pods().FilteredList(nodeInfo.Filter, labels.Everything()) - if err != nil { - return false, false, err - } - - affinityTerms := schedutil.GetPodAffinityTerms(affinity.PodAffinity) - antiAffinityTerms := schedutil.GetPodAntiAffinityTerms(affinity.PodAntiAffinity) - matchFound, termsSelectorMatchFound := false, false - for _, targetPod := range filteredPods { - // Check all affinity terms. - if !matchFound && len(affinityTerms) > 0 { - affTermsMatch, termsSelectorMatch, err := pl.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms) - if err != nil { - return false, false, err - } - if termsSelectorMatch { - termsSelectorMatchFound = true - } - if affTermsMatch { - matchFound = true - } - } - - // Check all anti-affinity terms. - if len(antiAffinityTerms) > 0 { - antiAffTermsMatch, _, err := pl.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms) - if err != nil || antiAffTermsMatch { - return true, false, err - } - } - } - - if !matchFound && len(affinityTerms) > 0 { - // We have not been able to find any matches for the pod's affinity terms. - // This pod may be the first pod in a series that have affinity to themselves. In order + // Check all affinity terms. + topologyToMatchedAffinityTerms := state.topologyToMatchedAffinityTerms + if affinityTerms := schedutil.GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { + matchExists := nodeMatchesAllTopologyTerms(pod, topologyToMatchedAffinityTerms, nodeInfo, affinityTerms) + if !matchExists { + // This pod may the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. - if termsSelectorMatchFound { - return false, false, nil - } - // Check if pod matches its own affinity terms (namespace and label selector). - if !targetPodMatchesAffinityOfPod(pod, pod) { + if len(topologyToMatchedAffinityTerms) != 0 || !targetPodMatchesAffinityOfPod(pod, pod) { return false, false, nil } } } + // Check all anti-affinity terms. + topologyToMatchedAntiAffinityTerms := state.topologyToMatchedAntiAffinityTerms + if antiAffinityTerms := schedutil.GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { + matchExists := nodeMatchesAnyTopologyTerm(pod, topologyToMatchedAntiAffinityTerms, nodeInfo, antiAffinityTerms) + if matchExists { + return true, false, nil + } + } + return true, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go index 9be62fb899..d31729e2f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go @@ -25,17 +25,23 @@ import ( schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = "InterPodAffinity" +const ( + // Name is the name of the plugin used in the plugin registry and configurations. + Name = "InterPodAffinity" + + defaultHardPodAffinityWeight int32 = 1 + minHardPodAffinityWeight int32 = 0 + maxHardPodAffinityWeight int32 = 100 +) // Args holds the args that are used to configure the plugin. type Args struct { - HardPodAffinityWeight int32 `json:"hardPodAffinityWeight,omitempty"` + HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` } var _ framework.PreFilterPlugin = &InterPodAffinity{} var _ framework.FilterPlugin = &InterPodAffinity{} -var _ framework.PostFilterPlugin = &InterPodAffinity{} +var _ framework.PreScorePlugin = &InterPodAffinity{} var _ framework.ScorePlugin = &InterPodAffinity{} // InterPodAffinity is a plugin that checks inter pod affinity @@ -55,14 +61,31 @@ func New(plArgs *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin if h.SnapshotSharedLister() == nil { return nil, fmt.Errorf("SnapshotSharedlister is nil") } - args := &Args{} if err := framework.DecodeInto(plArgs, args); err != nil { return nil, err } - - return &InterPodAffinity{ + if err := validateArgs(args); err != nil { + return nil, err + } + pl := &InterPodAffinity{ sharedLister: h.SnapshotSharedLister(), - hardPodAffinityWeight: args.HardPodAffinityWeight, - }, nil + hardPodAffinityWeight: defaultHardPodAffinityWeight, + } + if args.HardPodAffinityWeight != nil { + pl.hardPodAffinityWeight = *args.HardPodAffinityWeight + } + return pl, nil +} + +func validateArgs(args *Args) error { + if args.HardPodAffinityWeight == nil { + return nil + } + + weight := *args.HardPodAffinityWeight + if weight < minHardPodAffinityWeight || weight > maxHardPodAffinityWeight { + return fmt.Errorf("invalid args.hardPodAffinityWeight: %d, must be in the range %d-%d", weight, minHardPodAffinityWeight, maxHardPodAffinityWeight) + } + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go index ac0630e0f4..34897d669a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go @@ -29,11 +29,11 @@ import ( schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) -// postFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring. -const postFilterStateKey = "PostFilter" + Name +// preScoreStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring. +const preScoreStateKey = "PreScore" + Name -// postFilterState computed at PostFilter and used at Score. -type postFilterState struct { +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { topologyScore map[string]map[string]int64 affinityTerms []*weightedAffinityTerm antiAffinityTerms []*weightedAffinityTerm @@ -41,7 +41,7 @@ type postFilterState struct { // Clone implements the mandatory Clone interface. We don't really copy the data since // there is no need for that. -func (s *postFilterState) Clone() framework.StateData { +func (s *preScoreState) Clone() framework.StateData { return s } @@ -77,7 +77,7 @@ func getWeightedAffinityTerms(pod *v1.Pod, v1Terms []v1.WeightedPodAffinityTerm) } func (pl *InterPodAffinity) processTerm( - state *postFilterState, + state *preScoreState, term *weightedAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, @@ -100,14 +100,14 @@ func (pl *InterPodAffinity) processTerm( return } -func (pl *InterPodAffinity) processTerms(state *postFilterState, terms []*weightedAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) error { +func (pl *InterPodAffinity) processTerms(state *preScoreState, terms []*weightedAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) error { for _, term := range terms { pl.processTerm(state, term, podToCheck, fixedNode, multiplier) } return nil } -func (pl *InterPodAffinity) processExistingPod(state *postFilterState, existingPod *v1.Pod, existingPodNodeInfo *nodeinfo.NodeInfo, incomingPod *v1.Pod) error { +func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *nodeinfo.NodeInfo, incomingPod *v1.Pod) error { existingPodAffinity := existingPod.Spec.Affinity existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil @@ -166,8 +166,8 @@ func (pl *InterPodAffinity) processExistingPod(state *postFilterState, existingP return nil } -// PostFilter builds and writes cycle state used by Score and NormalizeScore. -func (pl *InterPodAffinity) PostFilter( +// PreScore builds and writes cycle state used by Score and NormalizeScore. +func (pl *InterPodAffinity) PreScore( pCtx context.Context, cycleState *framework.CycleState, pod *v1.Pod, @@ -215,7 +215,7 @@ func (pl *InterPodAffinity) PostFilter( } } - state := &postFilterState{ + state := &preScoreState{ topologyScore: make(map[string]map[string]int64), affinityTerms: affinityTerms, antiAffinityTerms: antiAffinityTerms, @@ -248,19 +248,19 @@ func (pl *InterPodAffinity) PostFilter( return framework.NewStatus(framework.Error, err.Error()) } - cycleState.Write(postFilterStateKey, state) + cycleState.Write(preScoreStateKey, state) return nil } -func getPostFilterState(cycleState *framework.CycleState) (*postFilterState, error) { - c, err := cycleState.Read(postFilterStateKey) +func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { + c, err := cycleState.Read(preScoreStateKey) if err != nil { - return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err) + return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err) } - s, ok := c.(*postFilterState) + s, ok := c.(*preScoreState) if !ok { - return nil, fmt.Errorf("%+v convert to interpodaffinity.postFilterState error", c) + return nil, fmt.Errorf("%+v convert to interpodaffinity.preScoreState error", c) } return s, nil } @@ -275,7 +275,7 @@ func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.Cyc } node := nodeInfo.Node() - s, err := getPostFilterState(cycleState) + s, err := getPreScoreState(cycleState) if err != nil { return 0, framework.NewStatus(framework.Error, err.Error()) } @@ -293,7 +293,7 @@ func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.Cyc // The basic rule is: the bigger the score(matching number of pods) is, the smaller the // final normalized score will be. func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - s, err := getPostFilterState(cycleState) + s, err := getPreScoreState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/legacy_registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/legacy_registry.go index 68d5c7bc2d..84f94bf1ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/legacy_registry.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/legacy_registry.go @@ -330,12 +330,12 @@ func NewLegacyRegistry() *LegacyRegistry { registry.registerPriorityConfigProducer(SelectorSpreadPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { plugins.Score = appendToPluginSet(plugins.Score, defaultpodtopologyspread.Name, &args.Weight) - plugins.PostFilter = appendToPluginSet(plugins.PostFilter, defaultpodtopologyspread.Name, nil) + plugins.PreScore = appendToPluginSet(plugins.PreScore, defaultpodtopologyspread.Name, nil) return }) registry.registerPriorityConfigProducer(TaintTolerationPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { - plugins.PostFilter = appendToPluginSet(plugins.PostFilter, tainttoleration.Name, nil) + plugins.PreScore = appendToPluginSet(plugins.PreScore, tainttoleration.Name, nil) plugins.Score = appendToPluginSet(plugins.Score, tainttoleration.Name, &args.Weight) return }) @@ -351,7 +351,7 @@ func NewLegacyRegistry() *LegacyRegistry { }) registry.registerPriorityConfigProducer(InterPodAffinityPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { - plugins.PostFilter = appendToPluginSet(plugins.PostFilter, interpodaffinity.Name, nil) + plugins.PreScore = appendToPluginSet(plugins.PreScore, interpodaffinity.Name, nil) plugins.Score = appendToPluginSet(plugins.Score, interpodaffinity.Name, &args.Weight) pluginConfig = append(pluginConfig, makePluginConfig(interpodaffinity.Name, args.InterPodAffinityArgs)) return @@ -420,7 +420,7 @@ func NewLegacyRegistry() *LegacyRegistry { registry.registerPriorityConfigProducer(EvenPodsSpreadPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { - plugins.PostFilter = appendToPluginSet(plugins.PostFilter, podtopologyspread.Name, nil) + plugins.PreScore = appendToPluginSet(plugins.PreScore, podtopologyspread.Name, nil) plugins.Score = appendToPluginSet(plugins.Score, podtopologyspread.Name, &args.Weight) return }) @@ -433,7 +433,7 @@ func NewLegacyRegistry() *LegacyRegistry { registry.registerPriorityConfigProducer(ResourceLimitsPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { - plugins.PostFilter = appendToPluginSet(plugins.PostFilter, noderesources.ResourceLimitsName, nil) + plugins.PreScore = appendToPluginSet(plugins.PreScore, noderesources.ResourceLimitsName, nil) plugins.Score = appendToPluginSet(plugins.Score, noderesources.ResourceLimitsName, &args.Weight) return }) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go index df667c3a9c..db20a3ca4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go @@ -86,9 +86,8 @@ func (pl *NodePorts) PreFilterExtensions() framework.PreFilterExtensions { func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { - // preFilterState state doesn't exist. We ignore the error for now since - // Filter is able to handle that by computing it again. - return nil, nil + // preFilterState doesn't exist, likely PreFilter wasn't invoked. + return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } s, ok := c.(preFilterState) @@ -105,12 +104,7 @@ func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleStat return framework.NewStatus(framework.Error, err.Error()) } - var fits bool - if wantPorts != nil { - fits = fitsPorts(wantPorts, nodeInfo) - } else { - fits = Fits(pod, nodeInfo) - } + fits := fitsPorts(wantPorts, nodeInfo) if !fits { return framework.NewStatus(framework.Unschedulable, ErrReason) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go index 7f2b3f777b..683d5105bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" @@ -52,7 +51,7 @@ type Fit struct { type FitArgs struct { // IgnoredResources is the list of resources that NodeResources fit filter // should ignore. - IgnoredResources []string `json:"IgnoredResources,omitempty"` + IgnoredResources []string `json:"ignoredResources,omitempty"` } // preFilterState computed at PreFilter and used at Filter. @@ -130,10 +129,8 @@ func (f *Fit) PreFilterExtensions() framework.PreFilterExtensions { func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { - // The preFilterState doesn't exist. We ignore the error for now since - // Filter is able to handle that by computing it again. - klog.V(5).Infof("Error reading %q from cycleState: %v", preFilterStateKey, err) - return nil, nil + // preFilterState doesn't exist, likely PreFilter wasn't invoked. + return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } s, ok := c.(*preFilterState) @@ -152,35 +149,28 @@ func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod return framework.NewStatus(framework.Error, err.Error()) } - var insufficientResources []InsufficientResource - if s != nil { - insufficientResources = fitsRequest(s, nodeInfo, f.ignoredResources) - } else { - insufficientResources = Fits(pod, nodeInfo, f.ignoredResources) - } - - // We will keep all failure reasons. - var failureReasons []string - for _, r := range insufficientResources { - failureReasons = append(failureReasons, getErrReason(r.ResourceName)) - } + insufficientResources := fitsRequest(s, nodeInfo, f.ignoredResources) if len(insufficientResources) != 0 { + // We will keep all failure reasons. + failureReasons := make([]string, 0, len(insufficientResources)) + for _, r := range insufficientResources { + failureReasons = append(failureReasons, r.Reason) + } return framework.NewStatus(framework.Unschedulable, failureReasons...) } return nil } -func getErrReason(rn v1.ResourceName) string { - return fmt.Sprintf("Insufficient %v", rn) -} - // InsufficientResource describes what kind of resource limit is hit and caused the pod to not fit the node. type InsufficientResource struct { ResourceName v1.ResourceName - Requested int64 - Used int64 - Capacity int64 + // We explicitly have a parameter for reason to avoid formatting a message on the fly + // for common resources, which is expensive for cluster autoscaler simulations. + Reason string + Requested int64 + Used int64 + Capacity int64 } // Fits checks if node have enough resources to host the pod. @@ -189,12 +179,13 @@ func Fits(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedReso } func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { - var insufficientResources []InsufficientResource + insufficientResources := make([]InsufficientResource, 0, 4) allowedPodNumber := nodeInfo.AllowedPodNumber() if len(nodeInfo.Pods())+1 > allowedPodNumber { insufficientResources = append(insufficientResources, InsufficientResource{ v1.ResourcePods, + "Too many pods", 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber), @@ -216,6 +207,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInf if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU { insufficientResources = append(insufficientResources, InsufficientResource{ v1.ResourceCPU, + "Insufficient cpu", podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU, @@ -224,6 +216,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInf if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory { insufficientResources = append(insufficientResources, InsufficientResource{ v1.ResourceMemory, + "Insufficient memory", podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory, @@ -232,6 +225,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInf if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage { insufficientResources = append(insufficientResources, InsufficientResource{ v1.ResourceEphemeralStorage, + "Insufficient ephemeral-storage", podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage, @@ -249,6 +243,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInf if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] { insufficientResources = append(insufficientResources, InsufficientResource{ rName, + fmt.Sprintf("Insufficient %v", rName), podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_limits.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_limits.go index 444e3a8607..abe8969700 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_limits.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_limits.go @@ -32,25 +32,25 @@ type ResourceLimits struct { handle framework.FrameworkHandle } -var _ = framework.PostFilterPlugin(&ResourceLimits{}) +var _ = framework.PreScorePlugin(&ResourceLimits{}) var _ = framework.ScorePlugin(&ResourceLimits{}) const ( // ResourceLimitsName is the name of the plugin used in the plugin registry and configurations. ResourceLimitsName = "NodeResourceLimits" - // postFilterStateKey is the key in CycleState to NodeResourceLimits pre-computed data. + // preScoreStateKey is the key in CycleState to NodeResourceLimits pre-computed data. // Using the name of the plugin will likely help us avoid collisions with other plugins. - postFilterStateKey = "PostFilter" + ResourceLimitsName + preScoreStateKey = "PreScore" + ResourceLimitsName ) -// postFilterState computed at PostFilter and used at Score. -type postFilterState struct { +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { podResourceRequest *schedulernodeinfo.Resource } -// Clone the postFilter state. -func (s *postFilterState) Clone() framework.StateData { +// Clone the preScore state. +func (s *preScoreState) Clone() framework.StateData { return s } @@ -59,8 +59,8 @@ func (rl *ResourceLimits) Name() string { return ResourceLimitsName } -// PostFilter builds and writes cycle state used by Score and NormalizeScore. -func (rl *ResourceLimits) PostFilter( +// PreScore builds and writes cycle state used by Score and NormalizeScore. +func (rl *ResourceLimits) PreScore( pCtx context.Context, cycleState *framework.CycleState, pod *v1.Pod, @@ -75,22 +75,22 @@ func (rl *ResourceLimits) PostFilter( if rl.handle.SnapshotSharedLister() == nil { return framework.NewStatus(framework.Error, fmt.Sprintf("empty shared lister")) } - s := &postFilterState{ + s := &preScoreState{ podResourceRequest: getResourceLimits(pod), } - cycleState.Write(postFilterStateKey, s) + cycleState.Write(preScoreStateKey, s) return nil } func getPodResource(cycleState *framework.CycleState) (*schedulernodeinfo.Resource, error) { - c, err := cycleState.Read(postFilterStateKey) + c, err := cycleState.Read(preScoreStateKey) if err != nil { - return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err) + return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err) } - s, ok := c.(*postFilterState) + s, ok := c.(*preScoreState) if !ok { - return nil, fmt.Errorf("%+v convert to ResourceLimits.postFilterState error", c) + return nil, fmt.Errorf("%+v convert to ResourceLimits.preScoreState error", c) } return s.podResourceRequest, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index bf67b3c4bf..3a3a789b95 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -35,6 +35,7 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 4c59eb1fc5..87d4d9603f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -194,10 +194,8 @@ func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framewor func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { - // The preFilterState wasn't pre-computed in prefilter. We ignore the error for now since - // we are able to handle that by computing it again (e.g. in Filter()). - klog.V(5).Infof("Error reading %q from cycleState: %v", preFilterStateKey, err) - return nil, nil + // preFilterState doesn't exist, likely PreFilter wasn't invoked. + return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } s, ok := c.(*preFilterState) @@ -255,7 +253,8 @@ func calPreFilterState(pod *v1.Pod, allNodes []*schedulernodeinfo.NodeInfo) (*pr matchTotal := int32(0) // nodeInfo.Pods() can be empty; or all pods don't fit for _, existingPod := range nodeInfo.Pods() { - if existingPod.Namespace != pod.Namespace { + // Bypass terminating Pod (see #87621). + if existingPod.DeletionTimestamp != nil || existingPod.Namespace != pod.Namespace { continue } if constraint.selector.Matches(labels.Set(existingPod.Labels)) { @@ -291,11 +290,6 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C if err != nil { return framework.NewStatus(framework.Error, err.Error()) } - // nil preFilterState is illegal. - if s == nil { - // TODO(autoscaler): get it implemented. - return framework.NewStatus(framework.Error, "preFilterState not pre-computed for PodTopologySpread Plugin") - } // However, "empty" preFilterState is legit which tolerates every toSchedule Pod. if len(s.tpPairToMatchNum) == 0 || len(s.constraints) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go index e8140526b4..85338fdc5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go @@ -36,7 +36,7 @@ type PodTopologySpread struct { var _ framework.PreFilterPlugin = &PodTopologySpread{} var _ framework.FilterPlugin = &PodTopologySpread{} -var _ framework.PostFilterPlugin = &PodTopologySpread{} +var _ framework.PreScorePlugin = &PodTopologySpread{} var _ framework.ScorePlugin = &PodTopologySpread{} const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go index 25b7c72e5e..c87de4a384 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go @@ -31,10 +31,10 @@ import ( framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) -const postFilterStateKey = "PostFilter" + Name +const preScoreStateKey = "PreScore" + Name -// postFilterState computed at PostFilter and used at Score. -type postFilterState struct { +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { constraints []topologySpreadConstraint // nodeNameSet is a string set holding all node names which have all constraints[*].topologyKey present. nodeNameSet sets.String @@ -44,7 +44,7 @@ type postFilterState struct { // Clone implements the mandatory Clone interface. We don't really copy the data since // there is no need for that. -func (s *postFilterState) Clone() framework.StateData { +func (s *preScoreState) Clone() framework.StateData { return s } @@ -52,7 +52,7 @@ func (s *postFilterState) Clone() framework.StateData { // and initialize two maps: // 1) s.topologyPairToPodCounts: keyed with both eligible topology pair and node names. // 2) s.nodeNameSet: keyed with node name, and valued with a *int64 pointer for eligible node only. -func (s *postFilterState) initialize(pod *v1.Pod, filteredNodes []*v1.Node) error { +func (s *preScoreState) initialize(pod *v1.Pod, filteredNodes []*v1.Node) error { constraints, err := filterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.ScheduleAnyway) if err != nil { return err @@ -78,8 +78,8 @@ func (s *postFilterState) initialize(pod *v1.Pod, filteredNodes []*v1.Node) erro return nil } -// PostFilter builds and writes cycle state used by Score and NormalizeScore. -func (pl *PodTopologySpread) PostFilter( +// PreScore builds and writes cycle state used by Score and NormalizeScore. +func (pl *PodTopologySpread) PreScore( ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, @@ -96,18 +96,18 @@ func (pl *PodTopologySpread) PostFilter( return nil } - state := &postFilterState{ + state := &preScoreState{ nodeNameSet: sets.String{}, topologyPairToPodCounts: make(map[topologyPair]*int64), } err = state.initialize(pod, filteredNodes) if err != nil { - return framework.NewStatus(framework.Error, fmt.Sprintf("error when calculating postFilterState: %v", err)) + return framework.NewStatus(framework.Error, fmt.Sprintf("error when calculating preScoreState: %v", err)) } // return if incoming pod doesn't have soft topology spread constraints. if state.constraints == nil { - cycleState.Write(postFilterStateKey, state) + cycleState.Write(preScoreStateKey, state) return nil } @@ -135,6 +135,10 @@ func (pl *PodTopologySpread) PostFilter( // indicates how many pods (on current node) match the . matchSum := int64(0) for _, existingPod := range nodeInfo.Pods() { + // Bypass terminating Pod (see #87621). + if existingPod.DeletionTimestamp != nil || existingPod.Namespace != pod.Namespace { + continue + } if c.selector.Matches(labels.Set(existingPod.Labels)) { matchSum++ } @@ -144,7 +148,7 @@ func (pl *PodTopologySpread) PostFilter( } workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processAllNode) - cycleState.Write(postFilterStateKey, state) + cycleState.Write(preScoreStateKey, state) return nil } @@ -158,7 +162,7 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.Cy } node := nodeInfo.Node() - s, err := getPostFilterState(cycleState) + s, err := getPreScoreState(cycleState) if err != nil { return 0, framework.NewStatus(framework.Error, err.Error()) } @@ -183,7 +187,7 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.Cy // NormalizeScore invoked after scoring all nodes. func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - s, err := getPostFilterState(cycleState) + s, err := getPreScoreState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } @@ -242,15 +246,15 @@ func (pl *PodTopologySpread) ScoreExtensions() framework.ScoreExtensions { return pl } -func getPostFilterState(cycleState *framework.CycleState) (*postFilterState, error) { - c, err := cycleState.Read(postFilterStateKey) +func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { + c, err := cycleState.Read(preScoreStateKey) if err != nil { - return nil, fmt.Errorf("error reading %q from cycleState: %v", postFilterStateKey, err) + return nil, fmt.Errorf("error reading %q from cycleState: %v", preScoreStateKey, err) } - s, ok := c.(*postFilterState) + s, ok := c.(*preScoreState) if !ok { - return nil, fmt.Errorf("%+v convert to podtopologyspread.postFilterState error", c) + return nil, fmt.Errorf("%+v convert to podtopologyspread.preScoreState error", c) } return s, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD index 406ac17983..31e0779f98 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/BUILD @@ -13,7 +13,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go index 5ee3b8f26a..1edc43bf3f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -154,7 +153,7 @@ func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.Cyc // If addedPod is in the same namespace as the pod, update the list // of matching pods if applicable. - if s == nil || podToAdd.Namespace != podToSchedule.Namespace { + if podToAdd.Namespace != podToSchedule.Namespace { return nil } @@ -173,8 +172,7 @@ func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework. return framework.NewStatus(framework.Error, err.Error()) } - if s == nil || - len(s.matchingPodList) == 0 || + if len(s.matchingPodList) == 0 || podToRemove.Namespace != s.matchingPodList[0].Namespace { return nil } @@ -192,10 +190,8 @@ func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework. func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { c, err := cycleState.Read(preFilterStateKey) if err != nil { - // The metadata wasn't pre-computed in prefilter. We ignore the error for now since - // Filter is able to handle that by computing it again. - klog.V(5).Infof(fmt.Sprintf("reading %q from cycleState: %v", preFilterStateKey, err)) - return nil, nil + // preFilterState doesn't exist, likely PreFilter wasn't invoked. + return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err) } if c == nil { @@ -247,14 +243,6 @@ func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.Cyc if err != nil { return framework.NewStatus(framework.Error, err.Error()) } - if s == nil { - // Make the filter resilient in case preFilterState is missing. - s, err = pl.createPreFilterState(pod) - if err != nil { - return framework.NewStatus(framework.Error, fmt.Sprintf("could not create preFilterState: %v", err)) - - } - } pods, services := s.matchingPodList, s.matchingPodServices filteredPods := nodeInfo.FilterOutPods(pods) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 93164dd895..8d344912e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -34,14 +34,14 @@ type TaintToleration struct { } var _ framework.FilterPlugin = &TaintToleration{} -var _ framework.PostFilterPlugin = &TaintToleration{} +var _ framework.PreScorePlugin = &TaintToleration{} var _ framework.ScorePlugin = &TaintToleration{} const ( // Name is the name of the plugin used in the plugin registry and configurations. Name = "TaintToleration" - // postFilterStateKey is the key in CycleState to TaintToleration pre-computed data for Scoring. - postFilterStateKey = "PostFilter" + Name + // preScoreStateKey is the key in CycleState to TaintToleration pre-computed data for Scoring. + preScoreStateKey = "PreScore" + Name // ErrReasonNotMatch is the Filter reason status when not matching. ErrReasonNotMatch = "node(s) had taints that the pod didn't tolerate" ) @@ -77,14 +77,14 @@ func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleSta return framework.NewStatus(framework.UnschedulableAndUnresolvable, errReason) } -// postFilterState computed at PostFilter and used at Score. -type postFilterState struct { +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { tolerationsPreferNoSchedule []v1.Toleration } // Clone implements the mandatory Clone interface. We don't really copy the data since // there is no need for that. -func (s *postFilterState) Clone() framework.StateData { +func (s *preScoreState) Clone() framework.StateData { return s } @@ -99,28 +99,28 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi return } -// PostFilter builds and writes cycle state used by Score and NormalizeScore. -func (pl *TaintToleration) PostFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, _ framework.NodeToStatusMap) *framework.Status { +// PreScore builds and writes cycle state used by Score and NormalizeScore. +func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, _ framework.NodeToStatusMap) *framework.Status { if len(nodes) == 0 { return nil } tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations) - state := &postFilterState{ + state := &preScoreState{ tolerationsPreferNoSchedule: tolerationsPreferNoSchedule, } - cycleState.Write(postFilterStateKey, state) + cycleState.Write(preScoreStateKey, state) return nil } -func getPostFilterState(cycleState *framework.CycleState) (*postFilterState, error) { - c, err := cycleState.Read(postFilterStateKey) +func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { + c, err := cycleState.Read(preScoreStateKey) if err != nil { - return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err) + return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err) } - s, ok := c.(*postFilterState) + s, ok := c.(*preScoreState) if !ok { - return nil, fmt.Errorf("%+v convert to tainttoleration.postFilterState error", c) + return nil, fmt.Errorf("%+v convert to tainttoleration.preScoreState error", c) } return s, nil } @@ -148,7 +148,7 @@ func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleStat } node := nodeInfo.Node() - s, err := getPostFilterState(state) + s, err := getPreScoreState(state) if err != nil { return 0, framework.NewStatus(framework.Error, err.Error()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go index 8c1f5c827b..937369605f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/framework.go @@ -46,7 +46,7 @@ const ( preFilter = "PreFilter" preFilterExtensionAddPod = "PreFilterExtensionAddPod" preFilterExtensionRemovePod = "PreFilterExtensionRemovePod" - postFilter = "PostFilter" + preScore = "PreScore" score = "Score" scoreExtensionNormalize = "ScoreExtensionNormalize" preBind = "PreBind" @@ -67,7 +67,7 @@ type framework struct { queueSortPlugins []QueueSortPlugin preFilterPlugins []PreFilterPlugin filterPlugins []FilterPlugin - postFilterPlugins []PostFilterPlugin + preScorePlugins []PreScorePlugin scorePlugins []ScorePlugin reservePlugins []ReservePlugin preBindPlugins []PreBindPlugin @@ -103,7 +103,7 @@ func (f *framework) getExtensionPoints(plugins *config.Plugins) []extensionPoint {plugins.PreFilter, &f.preFilterPlugins}, {plugins.Filter, &f.filterPlugins}, {plugins.Reserve, &f.reservePlugins}, - {plugins.PostFilter, &f.postFilterPlugins}, + {plugins.PreScore, &f.preScorePlugins}, {plugins.Score, &f.scorePlugins}, {plugins.PreBind, &f.preBindPlugins}, {plugins.Bind, &f.bindPlugins}, @@ -461,10 +461,10 @@ func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state return status } -// RunPostFilterPlugins runs the set of configured post-filter plugins. If any -// of these plugins returns any status other than "Success", the given node is +// RunPreScorePlugins runs the set of configured pre-score plugins. If any +// of these plugins returns any status other than "Success", the given pod is // rejected. The filteredNodeStatuses is the set of filtered nodes and their statuses. -func (f *framework) RunPostFilterPlugins( +func (f *framework) RunPreScorePlugins( ctx context.Context, state *CycleState, pod *v1.Pod, @@ -473,12 +473,12 @@ func (f *framework) RunPostFilterPlugins( ) (status *Status) { startTime := time.Now() defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(postFilter, status.Code().String()).Observe(metrics.SinceInSeconds(startTime)) + metrics.FrameworkExtensionPointDuration.WithLabelValues(preScore, status.Code().String()).Observe(metrics.SinceInSeconds(startTime)) }() - for _, pl := range f.postFilterPlugins { - status = f.runPostFilterPlugin(ctx, pl, state, pod, nodes, filteredNodesStatuses) + for _, pl := range f.preScorePlugins { + status = f.runPreScorePlugin(ctx, pl, state, pod, nodes, filteredNodesStatuses) if !status.IsSuccess() { - msg := fmt.Sprintf("error while running %q postfilter plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) + msg := fmt.Sprintf("error while running %q prescore plugin for pod %q: %v", pl.Name(), pod.Name, status.Message()) klog.Error(msg) return NewStatus(Error, msg) } @@ -487,13 +487,13 @@ func (f *framework) RunPostFilterPlugins( return nil } -func (f *framework) runPostFilterPlugin(ctx context.Context, pl PostFilterPlugin, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status { +func (f *framework) runPreScorePlugin(ctx context.Context, pl PreScorePlugin, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status { if !state.ShouldRecordPluginMetrics() { - return pl.PostFilter(ctx, state, pod, nodes, filteredNodesStatuses) + return pl.PreScore(ctx, state, pod, nodes, filteredNodesStatuses) } startTime := time.Now() - status := pl.PostFilter(ctx, state, pod, nodes, filteredNodesStatuses) - f.metricsRecorder.observePluginDurationAsync(postFilter, pl.Name(), status, metrics.SinceInSeconds(startTime)) + status := pl.PreScore(ctx, state, pod, nodes, filteredNodesStatuses) + f.metricsRecorder.observePluginDurationAsync(preScore, pl.Name(), status, metrics.SinceInSeconds(startTime)) return status } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go index 1c7fb52343..d606033fda 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/interface.go @@ -202,11 +202,9 @@ type WaitingPod interface { // Allow declares the waiting pod is allowed to be scheduled by plugin pluginName. // If this is the last remaining plugin to allow, then a success signal is delivered // to unblock the pod. - // Returns true if the allow signal was successfully dealt with, false otherwise. - Allow(pluginName string) bool - // Reject declares the waiting pod unschedulable. Returns true if the reject signal - // was successfully delivered, false otherwise. - Reject(msg string) bool + Allow(pluginName string) + // Reject declares the waiting pod unschedulable. + Reject(msg string) } // Plugin is the parent type for all the scheduling framework plugins. @@ -304,17 +302,17 @@ type FilterPlugin interface { Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status } -// PostFilterPlugin is an interface for Post-filter plugin. Post-filter is an +// PreScorePlugin is an interface for Pre-score plugin. Pre-score is an // informational extension point. Plugins will be called with a list of nodes // that passed the filtering phase. A plugin may use this data to update internal // state or to generate logs/metrics. -type PostFilterPlugin interface { +type PreScorePlugin interface { Plugin - // PostFilter is called by the scheduling framework after a list of nodes - // passed the filtering phase. All postfilter plugins must return success or + // PreScore is called by the scheduling framework after a list of nodes + // passed the filtering phase. All prescore plugins must return success or // the pod will be rejected. The filteredNodesStatuses is the set of filtered nodes // and their filter status. - PostFilter(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status + PreScore(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status } // ScoreExtensions is an interface for Score extended functionality. @@ -440,10 +438,10 @@ type Framework interface { // status other than Success. RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status - // RunPostFilterPlugins runs the set of configured post-filter plugins. If any - // of these plugins returns any status other than "Success", the given node is + // RunPreScorePlugins runs the set of configured pre-score plugins. If any + // of these plugins returns any status other than "Success", the given pod is // rejected. The filteredNodeStatuses is the set of filtered nodes and their statuses. - RunPostFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status + RunPreScorePlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node, filteredNodesStatuses NodeToStatusMap) *Status // RunScorePlugins runs the set of configured scoring plugins. It returns a map that // stores for each scoring plugin name the corresponding NodeScoreList(s). diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go index df7dbe6eee..3eaa0382dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/waiting_pods_map.go @@ -121,8 +121,7 @@ func (w *waitingPod) GetPendingPlugins() []string { // Allow declares the waiting pod is allowed to be scheduled by plugin pluginName. // If this is the last remaining plugin to allow, then a success signal is delivered // to unblock the pod. -// Returns true if the allow signal was successfully dealt with, false otherwise. -func (w *waitingPod) Allow(pluginName string) bool { +func (w *waitingPod) Allow(pluginName string) { w.mu.Lock() defer w.mu.Unlock() if timer, exist := w.pendingPlugins[pluginName]; exist { @@ -132,30 +131,29 @@ func (w *waitingPod) Allow(pluginName string) bool { // Only signal success status after all plugins have allowed if len(w.pendingPlugins) != 0 { - return true + return } + // The select clause works as a non-blocking send. + // If there is no receiver, it's a no-op (default case). select { case w.s <- NewStatus(Success, ""): - return true default: - return false } } -// Reject declares the waiting pod unschedulable. Returns true if the reject signal -// was successfully delivered, false otherwise. -func (w *waitingPod) Reject(msg string) bool { +// Reject declares the waiting pod unschedulable. +func (w *waitingPod) Reject(msg string) { w.mu.RLock() defer w.mu.RUnlock() for _, timer := range w.pendingPlugins { timer.Stop() } + // The select clause works as a non-blocking send. + // If there is no receiver, it's a no-op (default case). select { case w.s <- NewStatus(Unschedulable, msg): - return true default: - return false } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go index 76968ef78e..095b991ce9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go @@ -56,10 +56,13 @@ var ( StabilityLevel: metrics.ALPHA, }, []string{"result"}) // PodScheduleSuccesses counts how many pods were scheduled. + // This metric will be initialized again in Register() to assure the metric is not no-op metric. PodScheduleSuccesses = scheduleAttempts.With(metrics.Labels{"result": "scheduled"}) // PodScheduleFailures counts how many pods could not be scheduled. + // This metric will be initialized again in Register() to assure the metric is not no-op metric. PodScheduleFailures = scheduleAttempts.With(metrics.Labels{"result": "unschedulable"}) // PodScheduleErrors counts how many pods could not be scheduled due to a scheduler error. + // This metric will be initialized again in Register() to assure the metric is not no-op metric. PodScheduleErrors = scheduleAttempts.With(metrics.Labels{"result": "error"}) DeprecatedSchedulingDuration = metrics.NewSummaryVec( &metrics.SummaryOpts{ @@ -262,6 +265,9 @@ func Register() { legacyregistry.MustRegister(metric) } volumeschedulingmetrics.RegisterVolumeSchedulingMetrics() + PodScheduleSuccesses = scheduleAttempts.With(metrics.Labels{"result": "scheduled"}) + PodScheduleFailures = scheduleAttempts.With(metrics.Labels{"result": "unschedulable"}) + PodScheduleErrors = scheduleAttempts.With(metrics.Labels{"result": "error"}) }) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index 5ce7c1a202..f46b0c0746 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -33,23 +33,16 @@ func GetPodFullName(pod *v1.Pod) string { return pod.Name + "_" + pod.Namespace } -// GetPodStartTime returns start time of the given pod. +// GetPodStartTime returns start time of the given pod or current timestamp +// if it hasn't started yet. func GetPodStartTime(pod *v1.Pod) *metav1.Time { if pod.Status.StartTime != nil { return pod.Status.StartTime } - // Should not reach here as the start time of a running time should not be nil - // Return current timestamp as the default value. - // This will not affect the calculation of earliest timestamp of all the pods on one node, - // because current timestamp is always after the StartTime of any pod in good state. - klog.Errorf("pod.Status.StartTime is nil for pod %s. Should not reach here.", pod.Name) + // Assumed pods and bound pods that haven't started don't have a StartTime yet. return &metav1.Time{Time: time.Now()} } -// lessFunc is a function that receives two items and returns true if the first -// item should be placed before the second one when the list is sorted. -type lessFunc = func(item1, item2 interface{}) bool - // GetEarliestPodStartTime returns the earliest start time of all pods that // have the highest priority among all victims. func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD index bd314ae6a2..d28601f8f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD @@ -12,6 +12,7 @@ go_library( "claims.go", "jwt.go", "legacy.go", + "openidmetadata.go", "util.go", ], importpath = "k8s.io/kubernetes/pkg/serviceaccount", @@ -19,6 +20,7 @@ go_library( "//pkg/apis/core:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -46,12 +48,14 @@ go_test( srcs = [ "claims_test.go", "jwt_test.go", + "openidmetadata_test.go", "util_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", "//pkg/controller/serviceaccount:go_default_library", + "//pkg/routes:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", @@ -60,6 +64,8 @@ go_test( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/keyutil:go_default_library", + "//vendor/github.com/emicklei/go-restful:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go index ba03a22233..24be9629d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go @@ -114,6 +114,10 @@ func signerFromRSAPrivateKey(keyPair *rsa.PrivateKey) (jose.Signer, error) { return nil, fmt.Errorf("failed to derive keyID: %v", err) } + // IMPORTANT: If this function is updated to support additional key sizes, + // algorithmForPublicKey in serviceaccount/openidmetadata.go must also be + // updated to support the same key sizes. Today we only support RS256. + // Wrap the RSA keypair in a JOSE JWK with the designated key ID. privateJWK := &jose.JSONWebKey{ Algorithm: string(jose.RS256), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/openidmetadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/openidmetadata.go new file mode 100644 index 0000000000..56ec23d118 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/openidmetadata.go @@ -0,0 +1,295 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/json" + "fmt" + "net/url" + + jose "gopkg.in/square/go-jose.v2" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + // OpenIDConfigPath is the URL path at which the API server serves + // an OIDC Provider Configuration Information document, corresponding + // to the Kubernetes Service Account key issuer. + // https://openid.net/specs/openid-connect-discovery-1_0.html + OpenIDConfigPath = "/.well-known/openid-configuration" + + // JWKSPath is the URL path at which the API server serves a JWKS + // containing the public keys that may be used to sign Kubernetes + // Service Account keys. + JWKSPath = "/openid/v1/jwks" +) + +// OpenIDMetadata contains the pre-rendered responses for OIDC discovery endpoints. +type OpenIDMetadata struct { + ConfigJSON []byte + PublicKeysetJSON []byte +} + +// NewOpenIDMetadata returns the pre-rendered JSON responses for the OIDC discovery +// endpoints, or an error if they could not be constructed. Callers should note +// that this function may perform additional validation on inputs that is not +// backwards-compatible with all command-line validation. The recommendation is +// to log the error and skip installing the OIDC discovery endpoints. +func NewOpenIDMetadata(issuerURL, jwksURI, defaultExternalAddress string, pubKeys []interface{}) (*OpenIDMetadata, error) { + if issuerURL == "" { + return nil, fmt.Errorf("empty issuer URL") + } + if jwksURI == "" && defaultExternalAddress == "" { + return nil, fmt.Errorf("either the JWKS URI or the default external address, or both, must be set") + } + if len(pubKeys) == 0 { + return nil, fmt.Errorf("no keys provided for validating keyset") + } + + // Ensure the issuer URL meets the OIDC spec (this is the additional + // validation the doc comment warns about). + // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata + iss, err := url.Parse(issuerURL) + if err != nil { + return nil, err + } + if iss.Scheme != "https" { + return nil, fmt.Errorf("issuer URL must use https scheme, got: %s", issuerURL) + } + if iss.RawQuery != "" { + return nil, fmt.Errorf("issuer URL may not include a query, got: %s", issuerURL) + } + if iss.Fragment != "" { + return nil, fmt.Errorf("issuer URL may not include a fragment, got: %s", issuerURL) + } + + // Either use the provided JWKS URI or default to ExternalAddress plus + // the JWKS path. + if jwksURI == "" { + const msg = "attempted to build jwks_uri from external " + + "address %s, but could not construct a valid URL. Error: %v" + + if defaultExternalAddress == "" { + return nil, fmt.Errorf(msg, defaultExternalAddress, + fmt.Errorf("empty address")) + } + + u := &url.URL{ + Scheme: "https", + Host: defaultExternalAddress, + Path: JWKSPath, + } + jwksURI = u.String() + + // TODO(mtaufen): I think we can probably expect ExternalAddress is + // at most just host + port and skip the sanity check, but want to be + // careful until that is confirmed. + + // Sanity check that the jwksURI we produced is the valid URL we expect. + // This is just in case ExternalAddress came in as something weird, + // like a scheme + host + port, instead of just host + port. + parsed, err := url.Parse(jwksURI) + if err != nil { + return nil, fmt.Errorf(msg, defaultExternalAddress, err) + } else if u.Scheme != parsed.Scheme || + u.Host != parsed.Host || + u.Path != parsed.Path { + return nil, fmt.Errorf(msg, defaultExternalAddress, + fmt.Errorf("got %v, expected %v", parsed, u)) + } + } else { + // Double-check that jwksURI is an https URL + if u, err := url.Parse(jwksURI); err != nil { + return nil, err + } else if u.Scheme != "https" { + return nil, fmt.Errorf("jwksURI requires https scheme, parsed as: %v", u.String()) + } + } + + configJSON, err := openIDConfigJSON(issuerURL, jwksURI, pubKeys) + if err != nil { + return nil, fmt.Errorf("could not marshal issuer discovery JSON, error: %v", err) + } + + keysetJSON, err := openIDKeysetJSON(pubKeys) + if err != nil { + return nil, fmt.Errorf("could not marshal issuer keys JSON, error: %v", err) + } + + return &OpenIDMetadata{ + ConfigJSON: configJSON, + PublicKeysetJSON: keysetJSON, + }, nil +} + +// openIDMetadata provides a minimal subset of OIDC provider metadata: +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +type openIDMetadata struct { + Issuer string `json:"issuer"` // REQUIRED in OIDC; meaningful to relying parties. + // TODO(mtaufen): Since our goal is compatibility for relying parties that + // need to validate ID tokens, but do not need to initiate login flows, + // and since we aren't sure what to put in authorization_endpoint yet, + // we will omit this field until someone files a bug. + // AuthzEndpoint string `json:"authorization_endpoint"` // REQUIRED in OIDC; but useless to relying parties. + JWKSURI string `json:"jwks_uri"` // REQUIRED in OIDC; meaningful to relying parties. + ResponseTypes []string `json:"response_types_supported"` // REQUIRED in OIDC + SubjectTypes []string `json:"subject_types_supported"` // REQUIRED in OIDC + SigningAlgs []string `json:"id_token_signing_alg_values_supported"` // REQUIRED in OIDC +} + +// openIDConfigJSON returns the JSON OIDC Discovery Doc for the service +// account issuer. +func openIDConfigJSON(iss, jwksURI string, keys []interface{}) ([]byte, error) { + keyset, errs := publicJWKSFromKeys(keys) + if errs != nil { + return nil, errs + } + + metadata := openIDMetadata{ + Issuer: iss, + JWKSURI: jwksURI, + ResponseTypes: []string{"id_token"}, // Kubernetes only produces ID tokens + SubjectTypes: []string{"public"}, // https://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes + SigningAlgs: getAlgs(keyset), // REQUIRED by OIDC + } + + metadataJSON, err := json.Marshal(metadata) + if err != nil { + return nil, fmt.Errorf("failed to marshal service account issuer metadata: %v", err) + } + + return metadataJSON, nil +} + +// openIDKeysetJSON returns the JSON Web Key Set for the service account +// issuer's keys. +func openIDKeysetJSON(keys []interface{}) ([]byte, error) { + keyset, errs := publicJWKSFromKeys(keys) + if errs != nil { + return nil, errs + } + + keysetJSON, err := json.Marshal(keyset) + if err != nil { + return nil, fmt.Errorf("failed to marshal service account issuer JWKS: %v", err) + } + + return keysetJSON, nil +} + +func getAlgs(keys *jose.JSONWebKeySet) []string { + algs := sets.NewString() + for _, k := range keys.Keys { + algs.Insert(k.Algorithm) + } + // Note: List returns a sorted slice. + return algs.List() +} + +type publicKeyGetter interface { + Public() crypto.PublicKey +} + +// publicJWKSFromKeys constructs a JSONWebKeySet from a list of keys. The key +// set will only contain the public keys associated with the input keys. +func publicJWKSFromKeys(in []interface{}) (*jose.JSONWebKeySet, errors.Aggregate) { + // Decode keys into a JWKS. + var keys jose.JSONWebKeySet + var errs []error + for i, key := range in { + var pubkey *jose.JSONWebKey + var err error + + switch k := key.(type) { + case publicKeyGetter: + // This is a private key. Get its public key + pubkey, err = jwkFromPublicKey(k.Public()) + default: + pubkey, err = jwkFromPublicKey(k) + } + if err != nil { + errs = append(errs, fmt.Errorf("error constructing JWK for key #%d: %v", i, err)) + continue + } + + if !pubkey.Valid() { + errs = append(errs, fmt.Errorf("key #%d not valid", i)) + continue + } + keys.Keys = append(keys.Keys, *pubkey) + } + if len(errs) != 0 { + return nil, errors.NewAggregate(errs) + } + return &keys, nil +} + +func jwkFromPublicKey(publicKey crypto.PublicKey) (*jose.JSONWebKey, error) { + alg, err := algorithmFromPublicKey(publicKey) + if err != nil { + return nil, err + } + + keyID, err := keyIDFromPublicKey(publicKey) + if err != nil { + return nil, err + } + + jwk := &jose.JSONWebKey{ + Algorithm: string(alg), + Key: publicKey, + KeyID: keyID, + Use: "sig", + } + + if !jwk.IsPublic() { + return nil, fmt.Errorf("JWK was not a public key! JWK: %v", jwk) + } + + return jwk, nil +} + +func algorithmFromPublicKey(publicKey crypto.PublicKey) (jose.SignatureAlgorithm, error) { + switch pk := publicKey.(type) { + case *rsa.PublicKey: + // IMPORTANT: If this function is updated to support additional key sizes, + // signerFromRSAPrivateKey in serviceaccount/jwt.go must also be + // updated to support the same key sizes. Today we only support RS256. + return jose.RS256, nil + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + return jose.ES256, nil + case elliptic.P384(): + return jose.ES384, nil + case elliptic.P521(): + return jose.ES512, nil + default: + return "", fmt.Errorf("unknown private key curve, must be 256, 384, or 521") + } + case jose.OpaqueSigner: + return jose.SignatureAlgorithm(pk.Public().Algorithm), nil + default: + return "", fmt.Errorf("unknown public key type, must be *rsa.PublicKey, *ecdsa.PublicKey, or jose.OpaqueSigner") + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go index 63e0e6fc3c..4830f52abf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "encoding/json" "fmt" "net" @@ -119,7 +120,7 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP { } err := wait.ExponentialBackoff(backoff, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), hostname, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to retrieve node info: %v", err) return false, nil @@ -201,7 +202,7 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N if err != nil { return nil } - _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) + _, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch) return err } @@ -226,7 +227,7 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } - if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -247,7 +248,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) - if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -260,7 +261,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode return nil, nil, err } - updatedNode, err := c.Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") + updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go index 984f0ea673..d61cb52374 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "encoding/json" "fmt" @@ -34,7 +35,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, uid types.UID return nil, nil, err } - updatedPod, err := c.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, patchBytes, "status") + updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go index be53866ee0..93f5ee5a2e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go @@ -71,7 +71,7 @@ func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error { } // CreateVolume creates an AWS EBS volume. -// Returns: volumeID, volumeSizeGB, labels, error +// Returns: volumeID, volumeSizeGB, labels, fstype, error func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (aws.KubernetesVolumeID, int, map[string]string, string, error) { cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider()) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index 4d7153b94f..fd16b98556 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -130,6 +130,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie availabilityZone string availabilityZones sets.String selectedAvailabilityZone string + writeAcceleratorEnabled string diskIopsReadWrite string diskMbpsReadWrite string @@ -178,6 +179,8 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie diskMbpsReadWrite = v case "diskencryptionsetid": diskEncryptionSetID = v + case azure.WriteAcceleratorEnabled: + writeAcceleratorEnabled = v default: return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } @@ -245,6 +248,9 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie if p.options.CloudTags != nil { tags = *(p.options.CloudTags) } + if strings.EqualFold(writeAcceleratorEnabled, "true") { + tags[azure.WriteAcceleratorEnabled] = "true" + } volumeOptions := &azure.ManagedDiskOptions{ DiskName: name, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go index 3407a20a04..eab56cb563 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go @@ -19,6 +19,7 @@ limitations under the License. package azure_file import ( + "context" "fmt" "strings" @@ -53,7 +54,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret return "", "", fmt.Errorf("Cannot get kube client") } - keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) + keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName) } @@ -89,7 +90,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun }, Type: "Opaque", } - _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret) + _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(context.TODO(), secret, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { err = nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go index 4ba29214fd..6bb939048f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go @@ -17,6 +17,7 @@ limitations under the License. package cephfs import ( + "context" "fmt" "os" "os/exec" @@ -104,7 +105,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go index 60092dfc52..a129746a9f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go @@ -19,6 +19,7 @@ limitations under the License. package cinder import ( + "context" "errors" "fmt" "io/ioutil" @@ -149,7 +150,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { // TODO: caching, currently it is overkill because it calls this function // only when it creates dynamic PV zones := make(sets.String) - nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { klog.V(2).Infof("Error listing nodes") return zones, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go index a89c177e51..e3ba528a14 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -32,6 +32,7 @@ import ( storage "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -105,7 +106,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string }, } - _, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = c.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) alreadyExist := false if err != nil { if !apierrors.IsAlreadyExists(err) { @@ -153,7 +154,7 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) { klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err) @@ -197,7 +198,7 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No attachID := getAttachmentName(volumeHandle, driverName, string(nodeName)) klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { attached[spec] = false klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) @@ -387,7 +388,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { attachID = getAttachmentName(volID, driverName, string(nodeName)) } - if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil { + if err := c.k8s.StorageV1().VolumeAttachments().Delete(context.TODO(), attachID, nil); err != nil { if apierrors.IsNotFound(err) { // object deleted or never existed, done klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) @@ -413,7 +414,7 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string, tim func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error { klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { //object deleted or never existed, done @@ -439,7 +440,7 @@ func (c *csiAttacher) waitForVolumeAttachDetachStatus(attach *storage.VolumeAtta return nil } - watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) + watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(context.TODO(), meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) if err != nil { return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle) } @@ -588,7 +589,7 @@ func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMoun pvName := filepath.Base(dir) // Get PV and check for errors - pv, err := k8s.CoreV1().PersistentVolumes().Get(pvName, meta.GetOptions{}) + pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, meta.GetOptions{}) if err != nil { return "", "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go index 8b46d3c3e7..43484b3ad2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go @@ -276,7 +276,7 @@ func (m *csiBlockMapper) SetUpDevice() error { // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName nodeName := string(m.plugin.host.GetNodeName()) attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return errors.New(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) } @@ -332,7 +332,7 @@ func (m *csiBlockMapper) MapPodDevice() (string, error) { // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName nodeName := string(m.plugin.host.GetNodeName()) attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return "", errors.New(log("blockMapper.MapPodDevice failed to get volume attachment [id=%v]: %v", attachID, err)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go index c99996cabf..c1d548c9f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "time" @@ -32,7 +31,6 @@ import ( "google.golang.org/grpc/status" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -153,23 +151,12 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( err error) { klog.V(4).Info(log("calling NodeGetInfo rpc")) - // TODO retries should happen at a lower layer (issue #73371) - backoff := wait.Backoff{Duration: initialDuration, Factor: factor, Steps: steps} - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - var getNodeInfoError error - nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx) - if nodeID != "" { - return true, nil - } - // kubelet plugin registration service not implemented is a terminal error, no need to retry - if strings.Contains(getNodeInfoError.Error(), "no handler registered for plugin type") { - return false, getNodeInfoError - } - // Continue with exponential backoff - return false, nil - }) - - return nodeID, maxVolumePerNode, accessibleTopology, err + var getNodeInfoError error + nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx) + if getNodeInfoError != nil { + klog.Warningf("Error calling CSI NodeGetInfo(): %v", getNodeInfoError.Error()) + } + return nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError } func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index a5e1254ff2..c9d831bdf5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "strconv" @@ -432,7 +431,7 @@ func removeMountDir(plug *csiPlugin, mountPath string) error { return errors.New(log("failed to remove dir [%s]: %v", mountPath, err)) } // remove volume data file as well - volPath := path.Dir(mountPath) + volPath := filepath.Dir(mountPath) dataFile := filepath.Join(volPath, volDataFileName) klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go index fa335a07fc..40a00bfdfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -20,7 +20,7 @@ import ( "errors" "fmt" "os" - "path" + "path/filepath" "strings" "time" @@ -216,6 +216,12 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { csitranslationplugins.CinderInTreePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) }, + csitranslationplugins.AzureDiskInTreePluginName: func() bool { + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) + }, + csitranslationplugins.AzureFileInTreePluginName: func() bool { + return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFile) + }, } // Initializing the label management channels @@ -387,7 +393,7 @@ func (p *csiPlugin) NewMounter( // Save volume info in pod dir dir := mounter.GetPath() - dataDir := path.Dir(dir) // dropoff /mount at end + dataDir := filepath.Dir(dir) // dropoff /mount at end if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, errors.New(log("failed to create dir %#v: %v", dataDir, err)) @@ -438,7 +444,7 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo // load volume info from file dir := unmounter.GetPath() - dataDir := path.Dir(dir) // dropoff /mount at end + dataDir := filepath.Dir(dir) // dropoff /mount at end data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { return nil, errors.New(log("unmounter failed to load volume data file [%s]: %v", dir, err)) @@ -842,7 +848,7 @@ func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver attachID := getAttachmentName(handle, driver, nodeName) // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName - attachment, err := client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err := client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return nil, err // This err already has enough context ("VolumeAttachment xyz not found") } @@ -914,12 +920,3 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) { } return highestSupportedVersion, nil } - -func isV0Version(version string) bool { - parsedVersion, err := utilversion.ParseGeneric(version) - if err != nil { - return false - } - - return parsedVersion.Major() == 0 -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go index 8201d14410..ca678bfac4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "encoding/json" "errors" "fmt" @@ -43,7 +44,7 @@ const ( func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) { credentials := map[string]string{} - secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{}) + secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(context.TODO(), secretRef.Name, meta.GetOptions{}) if err != nil { return credentials, errors.New(log("failed to find the secret %s in the namespace %s with error: %v", secretRef.Name, secretRef.Namespace, err)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index b6920ba078..98a16be6e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -19,6 +19,7 @@ limitations under the License. package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager" import ( + "context" "encoding/json" goerrors "errors" "fmt" @@ -183,7 +184,7 @@ func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error { } nodeClient := kubeClient.CoreV1().Nodes() - originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) + originalNode, err := nodeClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil { return err } @@ -379,7 +380,7 @@ func (nim *nodeInfoManager) tryUpdateCSINode( maxAttachLimit int64, topology map[string]string) error { - nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { nodeInfo, err = nim.CreateCSINode() } @@ -412,7 +413,7 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error { } func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient clientset.Interface) error { - nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { // CreateCSINode will set the annotation _, err = nim.CreateCSINode() @@ -424,7 +425,7 @@ func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient cli annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo) if annotationModified { - _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err } return nil @@ -443,7 +444,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { return nil, fmt.Errorf("error getting CSI client") } - node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{}) + node, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil { return nil, err } @@ -467,7 +468,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { setMigrationAnnotation(nim.migratedPlugins, nodeInfo) - return csiKubeClient.StorageV1().CSINodes().Create(nodeInfo) + return csiKubeClient.StorageV1().CSINodes().Create(context.TODO(), nodeInfo, metav1.CreateOptions{}) } func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1.CSINode) (modified bool) { @@ -570,7 +571,7 @@ func (nim *nodeInfoManager) installDriverToCSINode( newDriverSpecs = append(newDriverSpecs, driverSpec) nodeInfo.Spec.Drivers = newDriverSpecs - _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err } @@ -601,7 +602,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( csiDriverName string) error { nodeInfoClient := csiKubeClient.StorageV1().CSINodes() - nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := nodeInfoClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return nil } else if err != nil { @@ -627,7 +628,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( } nodeInfo.Spec.Drivers = drivers - _, err = nodeInfoClient.Update(nodeInfo) + _, err = nodeInfoClient.Update(context.TODO(), nodeInfo, metav1.UpdateOptions{}) return err // do not wrap error diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go index 4a431fd992..2d1598864b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go @@ -217,20 +217,22 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun } klog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - volumeMode: volumeMode, - readOnly: readOnly, - mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), - deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + fcDisk: fcDisk, + fsType: fc.FSType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + mountOptions: volumeutil.MountOptionFromSpec(spec), }, nil } return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - readOnly: readOnly, - mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), - deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + fcDisk: fcDisk, + fsType: fc.FSType, + readOnly: readOnly, + mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + mountOptions: volumeutil.MountOptionFromSpec(spec), }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index dfc2aa9d06..1710fce4ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -93,7 +93,7 @@ func (plugin *fcPlugin) RequiresRemount() bool { } func (plugin *fcPlugin) SupportsMountOption() bool { - return false + return true } func (plugin *fcPlugin) SupportsBulkVolumeVerification() bool { @@ -148,7 +148,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), - mountOptions: []string{}, + mountOptions: util.MountOptionFromSpec(spec), }, nil } return &fcDiskMounter{ @@ -312,7 +312,6 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m type fcDisk struct { volName string podUID types.UID - portal string wwns []string lun string wwids []string diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index 9fec400e8f..fe6d4c7066 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -268,7 +268,7 @@ func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) { return devicePath, nil } - err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) + err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, b.mountOptions) if err != nil { return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go index 723b28de4d..413f275c72 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go @@ -394,7 +394,6 @@ func (b *flockerVolumeMounter) updateDatasetPrimary(datasetUUID string, primaryU datasetUUID, primaryUUID, err, ) case <-tickChan.C: - break } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index 8067ba8334..6a5bb15465 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -17,6 +17,7 @@ limitations under the License. package glusterfs import ( + "context" "fmt" "math" "math/rand" @@ -152,7 +153,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(context.TODO(), epName, metav1.GetOptions{}) if err != nil { klog.Errorf("failed to get endpoint %s: %v", epName, err) return nil, err @@ -542,7 +543,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll if kubeClient == nil { return fmt.Errorf("failed to get kube client when collecting gids") } - pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := kubeClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { return fmt.Errorf("failed to get existing persistent volumes") } @@ -832,14 +833,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi endpoint.Subsets = subset endpoint.Subsets[0].Addresses = addrlist endpoint.Subsets[0].Ports = ports - _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(endpoint) + _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(context.TODO(), endpoint, metav1.UpdateOptions{}) if err != nil { deleteErr := cli.VolumeDelete(volume.Id) if deleteErr != nil { klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) } klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint) - err = kubeClient.CoreV1().Services(epNamespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(epNamespace).Delete(context.TODO(), epServiceName, nil) if err != nil && errors.IsNotFound(err) { klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace) err = nil @@ -883,7 +884,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string if kubeClient == nil { return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") } - _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) + _, err = kubeClient.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoint, metav1.CreateOptions{}) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) err = nil @@ -903,7 +904,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.CoreV1().Services(namespace).Create(service) + _, err = kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) err = nil @@ -920,7 +921,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi if kubeClient == nil { return fmt.Errorf("failed to get kube client when deleting endpoint service") } - err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), epServiceName, nil) if err != nil { return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index 957a90a331..2dbab1e54f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -17,6 +17,7 @@ limitations under the License. package iscsi import ( + "context" "fmt" "os" "path/filepath" @@ -590,7 +591,7 @@ func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) ( if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index da6cfbe708..8c0b629909 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -383,8 +383,7 @@ func (d *portworxVolumeDeleter) Delete() error { type portworxVolumeProvisioner struct { *portworxVolume - options volume.VolumeOptions - namespace string + options volume.VolumeOptions } var _ volume.Provisioner = &portworxVolumeProvisioner{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go index d6bd04d872..e76031132a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go @@ -17,6 +17,7 @@ limitations under the License. package portworx import ( + "context" "fmt" osdapi "github.com/libopenstorage/openstorage/api" @@ -358,7 +359,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) { } opts := metav1.GetOptions{} - svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) + svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(context.TODO(), pxServiceName, opts) if err != nil { klog.Errorf("Failed to get service. Err: %v", err) return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index a07fe8bf06..eb5caa71da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -17,6 +17,7 @@ limitations under the License. package rbd import ( + "context" "fmt" "os" "path/filepath" @@ -271,7 +272,7 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err @@ -304,7 +305,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Vol if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err @@ -486,7 +487,7 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index d453c04960..684ae29f6c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -17,6 +17,7 @@ limitations under the License. package operationexecutor import ( + "context" goerrors "errors" "fmt" "path/filepath" @@ -1274,7 +1275,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( } // Fetch current node object - node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) @@ -1316,7 +1317,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( func (og *operationGenerator) verifyVolumeIsSafeToDetach( volumeToDetach AttachedVolume) error { // Fetch current node object - node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) @@ -1547,7 +1548,7 @@ func (og *operationGenerator) nodeExpandVolume(volumeToMount VolumeToMount, rsOp expandableVolumePlugin.RequiresFSResize() && volumeToMount.VolumeSpec.PersistentVolume != nil { pv := volumeToMount.VolumeSpec.PersistentVolume - pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{}) + pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil { // Return error rather than leave the file system un-resized, caller will log and retry return false, fmt.Errorf("MountVolume.NodeExpandVolume get PVC failed : %v", err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go index 0011c1cf49..0558c1dced 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go @@ -17,6 +17,7 @@ limitations under the License. package recyclerclient import ( + "context" "fmt" "sync" @@ -177,15 +178,15 @@ type realRecyclerClient struct { } func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.CoreV1().Pods(pod.Namespace).Create(pod) + return c.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.CoreV1().Pods(namespace).Delete(name, nil) + return c.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil) } func (c *realRecyclerClient) Event(eventtype, message string) { @@ -204,13 +205,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s Watch: true, } - podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) + podWatch, err := c.client.CoreV1().Pods(namespace).Watch(context.TODO(), options) if err != nil { return nil, err } eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ + eventWatch, err := c.client.CoreV1().Events(namespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: eventSelector.String(), Watch: true, }) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index 0f10f1d57b..3dbffe1902 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "encoding/json" "fmt" @@ -76,7 +77,7 @@ func UpdatePVSize( return fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err) } - _, err = kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, types.StrategicMergePatchType, patchBytes) + _, err = kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return fmt.Errorf("error Patching PV %q with error : %v", pvClone.Name, err) } @@ -171,7 +172,7 @@ func PatchPVCStatus( } updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace). - Patch(oldPVC.Name, types.StrategicMergePatchType, patchBytes, "status") + Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if updateErr != nil { return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index e3baaa7c3a..ff32e666d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "fmt" "io/ioutil" "os" @@ -116,7 +117,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -132,7 +133,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -155,7 +156,7 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) return nil, fmt.Errorf("Volume has no storage class") } - class, err := kubeClient.StorageV1().StorageClasses().Get(className, metav1.GetOptions{}) + class, err := kubeClient.StorageV1().StorageClasses().Get(context.TODO(), className, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index 2d023da3b1..406be3a08d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -182,17 +182,11 @@ type vsphereVolume struct { podUID types.UID // Unique identifier of the volume, used to find the disk resource in the provider. volPath string - // Filesystem type, optional. - fsType string - //diskID for detach disk - diskID string // Utility interface that provides API calls to the provider to attach/detach disks. manager vdManager // Mounter interface that provides system calls to mount the global path to the pod local path. mounter mount.Interface - // diskMounter provides the interface that is used to mount the actual block device. - diskMounter mount.Interface - plugin *vsphereVolumePlugin + plugin *vsphereVolumePlugin volume.MetricsProvider } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/create_resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/create_resources.go index 21dd9801bf..51b92ace88 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/create_resources.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/create_resources.go @@ -19,6 +19,7 @@ limitations under the License. package utils import ( + "context" "fmt" "time" @@ -26,6 +27,7 @@ import ( batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -68,7 +70,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Pods(namespace).Create(obj) + _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -85,7 +87,7 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Create(obj) + _, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -102,7 +104,7 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) + _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -119,7 +121,7 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().Deployments(namespace).Create(obj) + _, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -136,7 +138,7 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().DaemonSets(namespace).Create(obj) + _, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -153,7 +155,7 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.BatchV1().Jobs(namespace).Create(obj) + _, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -170,7 +172,7 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Secrets(namespace).Create(obj) + _, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -187,7 +189,7 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1 return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ConfigMaps(namespace).Create(obj) + _, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -204,7 +206,7 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Services(namespace).Create(obj) + _, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -221,7 +223,7 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ResourceQuotas(namespace).Create(obj) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -238,7 +240,7 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumes().Create(obj) + _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -255,7 +257,7 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(obj) + _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/delete_resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/delete_resources.go index 1eb96cddaa..60d1ba44e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/delete_resources.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/delete_resources.go @@ -19,6 +19,7 @@ limitations under the License. package utils import ( + "context" "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,23 +35,23 @@ import ( func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { switch kind { case api.Kind("Pod"): - return c.CoreV1().Pods(namespace).Delete(name, options) + return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options) case api.Kind("ReplicationController"): - return c.CoreV1().ReplicationControllers(namespace).Delete(name, options) + return c.CoreV1().ReplicationControllers(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"): - return c.AppsV1().ReplicaSets(namespace).Delete(name, options) + return c.AppsV1().ReplicaSets(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"): - return c.AppsV1().Deployments(namespace).Delete(name, options) + return c.AppsV1().Deployments(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("DaemonSet"): - return c.AppsV1().DaemonSets(namespace).Delete(name, options) + return c.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, options) case batchinternal.Kind("Job"): - return c.BatchV1().Jobs(namespace).Delete(name, options) + return c.BatchV1().Jobs(namespace).Delete(context.TODO(), name, options) case api.Kind("Secret"): - return c.CoreV1().Secrets(namespace).Delete(name, options) + return c.CoreV1().Secrets(namespace).Delete(context.TODO(), name, options) case api.Kind("ConfigMap"): - return c.CoreV1().ConfigMaps(namespace).Delete(name, options) + return c.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, options) case api.Kind("Service"): - return c.CoreV1().Services(namespace).Delete(name, options) + return c.CoreV1().Services(namespace).Delete(context.TODO(), name, options) default: return fmt.Errorf("Unsupported kind when deleting: %v", kind) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go index d1b583cc62..aedf76c881 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "strings" "time" @@ -42,7 +43,7 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString) var err error for attempt := 0; attempt < retries; attempt++ { - _, err = c.CoreV1().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch)) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) if err != nil { if !apierrors.IsConflict(err) { return err @@ -61,7 +62,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri var node *v1.Node var err error for attempt := 0; attempt < retries; attempt++ { - node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -74,7 +75,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri } delete(node.Labels, labelKey) } - _, err = c.CoreV1().Nodes().Update(node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) if err != nil { if !apierrors.IsConflict(err) { return err @@ -92,7 +93,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri // VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys. // Return non-nil error if it does. func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error { - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go index 323b927d06..a9f982affe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "time" @@ -51,7 +52,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet, logf LogfFn) { minReadySeconds := deployment.Spec.MinReadySeconds podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return c.CoreV1().Pods(namespace).List(options) + return c.CoreV1().Pods(namespace).List(context.TODO(), options) } podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) @@ -80,7 +81,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -174,7 +175,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName var reason string err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -205,7 +206,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName // CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected. func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err) } @@ -259,12 +260,12 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if deployment, err = c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { + if deployment, err = c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) - if deployment, err = c.AppsV1().Deployments(namespace).Update(deployment); err == nil { + if deployment, err = c.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); err == nil { logf("Updating deployment %s", name) return true, nil } @@ -279,14 +280,14 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { return deploymentutil.WaitForObservedDeployment(func() (*apps.Deployment, error) { - return c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + return c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) }, desiredGeneration, 2*time.Second, 1*time.Minute) } // WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error { err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -306,7 +307,7 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error { var deployment *apps.Deployment err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -322,7 +323,7 @@ func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error { var deployment *apps.Deployment pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pod_store.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pod_store.go index 7dae2b12cf..b48796c217 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pod_store.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pod_store.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "time" "k8s.io/api/core/v1" @@ -42,13 +43,13 @@ func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - obj, err := c.CoreV1().Pods(namespace).List(options) + obj, err := c.CoreV1().Pods(namespace).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - return c.CoreV1().Pods(namespace).Watch(options) + return c.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/replicaset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/replicaset.go index 838dd891f0..625d6c7cc8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/replicaset.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "testing" "time" @@ -34,12 +35,12 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).Update(rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Update(context.TODO(), rs, metav1.UpdateOptions{}); err == nil { logf("Updating replica set %q", name) return true, nil } @@ -56,7 +57,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet, pollInterval, pollTimeout time.Duration) error { desiredGeneration := rs.Generation if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -72,12 +73,12 @@ func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name st var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}); err == nil { logf("Updating replica set %q", name) return true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go index bfef242767..e53e8b8e58 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -66,7 +66,7 @@ func removePtr(replicas *int32) int32 { func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { // Wait until it's scheduled - p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -74,7 +74,7 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time startTime := time.Now() for startTime.Add(timeout).After(time.Now()) { time.Sleep(pollingPeriod) - p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -852,7 +852,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := metav1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(options); err == nil { + if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) } @@ -1088,7 +1088,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d) } - _, err := client.StorageV1beta1().CSINodes().Create(csiNode) + _, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error @@ -1118,12 +1118,12 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, } csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",") - _, err := client.StorageV1beta1().CSINodes().Update(csiNode) + _, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{}) return err } func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { - csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{}) + csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return s.createCSINode(node.Name, client) @@ -1134,7 +1134,7 @@ func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client } func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { - csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{}) + csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -1194,7 +1194,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil { + if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1224,7 +1224,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { var err error for attempt := 0; attempt < retries; attempt++ { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) } @@ -1232,7 +1232,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil { + if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1698,7 +1698,7 @@ func (config *DaemonConfig) Run() error { var err error for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries { diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go index 252c70d471..8e67eb7e45 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws.go @@ -2084,7 +2084,7 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { // applyUnSchedulableTaint applies a unschedulable taint to a node after verifying // if node has become unusable because of volumes getting stuck in attaching state. func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { - node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nodeName), metav1.GetOptions{}) if fetchErr != nil { klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) return diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go index ef77b50244..17383e52b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go @@ -488,12 +488,6 @@ func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttribut panic("Not implemented") } -// expectDescribeLoadBalancers is not implemented but is required for interface -// conformance -func (elb *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { - panic("Not implemented") -} - // FakeELBV2 is a fake ELBV2 client used for testing type FakeELBV2 struct { aws *FakeAWSServices diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go index 7332a1240d..8d9383a0e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go @@ -97,7 +97,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( env.ServiceManagementEndpoint) } - oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID) + oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, tenantID, nil) if err != nil { return nil, fmt.Errorf("creating the OAuth config: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 466f06bfd3..74342f42ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -213,6 +213,9 @@ type Config struct { NsgCacheTTLInSeconds int `json:"nsgCacheTTLInSeconds,omitempty" yaml:"nsgCacheTTLInSeconds,omitempty"` // RouteTableCacheTTLInSeconds sets the cache TTL for route table RouteTableCacheTTLInSeconds int `json:"routeTableCacheTTLInSeconds,omitempty" yaml:"routeTableCacheTTLInSeconds,omitempty"` + + // DisableAvailabilitySetNodes disables VMAS nodes support when "VMType" is set to "vmss". + DisableAvailabilitySetNodes bool `json:"disableAvailabilitySetNodes,omitempty" yaml:"disableAvailabilitySetNodes,omitempty"` } var _ cloudprovider.Interface = (*Cloud)(nil) @@ -353,6 +356,10 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro config.VMType = vmTypeStandard } + if config.DisableAvailabilitySetNodes && config.VMType != vmTypeVMSS { + return fmt.Errorf("disableAvailabilitySetNodes %v is only supported when vmType is 'vmss'", config.DisableAvailabilitySetNodes) + } + if config.CloudConfigType == "" { // The default cloud config type is cloudConfigTypeMerge. config.CloudConfigType = cloudConfigTypeMerge @@ -492,22 +499,22 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro az.RouteTablesClient = routetableclient.New(azClientConfig.WithRateLimiter(config.RouteTableRateLimit)) az.LoadBalancerClient = loadbalancerclient.New(azClientConfig.WithRateLimiter(config.LoadBalancerRateLimit)) az.SecurityGroupsClient = securitygroupclient.New(azClientConfig.WithRateLimiter(config.SecurityGroupRateLimit)) - az.VirtualMachinesClient = vmclient.New(azClientConfig.WithRateLimiter(config.VirtualMachineRateLimit)) az.PublicIPAddressesClient = publicipclient.New(azClientConfig.WithRateLimiter(config.PublicIPAddressRateLimit)) az.VirtualMachineScaleSetsClient = vmssclient.New(azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit)) - az.DisksClient = diskclient.New(azClientConfig.WithRateLimiter(config.DiskRateLimit)) az.VirtualMachineSizesClient = vmsizeclient.New(azClientConfig.WithRateLimiter(config.VirtualMachineSizeRateLimit)) az.SnapshotsClient = snapshotclient.New(azClientConfig.WithRateLimiter(config.SnapshotRateLimit)) az.StorageAccountClient = storageaccountclient.New(azClientConfig.WithRateLimiter(config.StorageAccountRateLimit)) + az.VirtualMachinesClient = vmclient.New(azClientConfig.WithRateLimiter(config.VirtualMachineRateLimit)) + az.DisksClient = diskclient.New(azClientConfig.WithRateLimiter(config.DiskRateLimit)) + // fileClient is not based on armclient, but it's still backoff retried. + az.FileClient = newAzureFileClient(env, azClientConfig.Backoff) // Error "not an active Virtual Machine Scale Set VM" is not retriable for VMSS VM. + // But http.StatusNotFound is retriable because of ARM replication latency. vmssVMClientConfig := azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit) - vmssVMClientConfig.Backoff = vmssVMClientConfig.Backoff.WithNonRetriableErrors([]string{vmssVMNotActiveErrorMessage}) + vmssVMClientConfig.Backoff = vmssVMClientConfig.Backoff.WithNonRetriableErrors([]string{vmssVMNotActiveErrorMessage}).WithRetriableHTTPStatusCodes([]int{http.StatusNotFound}) az.VirtualMachineScaleSetVMsClient = vmssvmclient.New(vmssVMClientConfig) - // TODO(feiskyer): refactor azureFileClient to Interface. - az.FileClient = &azureFileClient{env: *env} - if az.MaximumLoadBalancerRuleCount == 0 { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go index 38f21378b4..3ee1ccbf4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_cache.go @@ -38,6 +38,9 @@ const ( // active/expired. If entry doesn't exist in cache, then data is fetched // using getter, saved in cache and returned cacheReadTypeUnsafe + // cacheReadTypeForceRefresh force refreshes the cache even if the cache entry + // is not expired + cacheReadTypeForceRefresh ) // getFunc defines a getter function for timedCache. @@ -120,20 +123,20 @@ func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) { entry.lock.Lock() defer entry.lock.Unlock() - // entry exists - if entry.data != nil { + // entry exists and if cache is not force refreshed + if entry.data != nil && crt != cacheReadTypeForceRefresh { // allow unsafe read, so return data even if expired if crt == cacheReadTypeUnsafe { return entry.data, nil } // if cached data is not expired, return cached data - if time.Since(entry.createdOn) < t.ttl { + if crt == cacheReadTypeDefault && time.Since(entry.createdOn) < t.ttl { return entry.data, nil } } - // Data is not cached yet or cache data is expired, cache it by getter. - // entry is locked before getting to ensure concurrent gets don't result in - // multiple ARM calls. + // Data is not cached yet, cache data is expired or requested force refresh + // cache it by getter. entry is locked before getting to ensure concurrent + // gets don't result in multiple ARM calls. data, err := t.getter(key) if err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_config.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_config.go index 7f094e47e0..cf953b331a 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_config.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_config.go @@ -19,6 +19,7 @@ limitations under the License. package azure import ( + "context" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +69,7 @@ func (az *Cloud) getConfigFromSecret() (*Config, error) { return nil, nil } - secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(cloudConfigSecretName, metav1.GetOptions{}) + secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(context.TODO(), cloudConfigSecretName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get secret %s: %v", cloudConfigSecretName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index e693e99df4..513c63ba28 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -51,6 +51,10 @@ const ( sourceSnapshot = "snapshot" sourceVolume = "volume" + // WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks + // https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator + WriteAcceleratorEnabled = "writeacceleratorenabled" + // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot. diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s" @@ -113,6 +117,8 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt cacheReadTy // return (lun, error) func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) { diskEncryptionSetID := "" + writeAcceleratorEnabled := false + if isManagedDisk { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) @@ -142,6 +148,11 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID } + if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok { + if v != nil && strings.EqualFold(*v, "true") { + writeAcceleratorEnabled = true + } + } } vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe) @@ -167,7 +178,7 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName) c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "attaching") defer c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) - return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode, diskEncryptionSetID) + return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode, diskEncryptionSetID, writeAcceleratorEnabled) } // DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index ed6b5c6d6e..da7969dbc2 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" @@ -29,7 +30,7 @@ import ( // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. -func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { +func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error { vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault) if err != nil { return err @@ -59,11 +60,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri } disks = append(disks, compute.DataDisk{ - Name: &diskName, - Lun: &lun, - Caching: cachingMode, - CreateOption: "attach", - ManagedDisk: managedDisk, + Name: &diskName, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + ManagedDisk: managedDisk, + WriteAcceleratorEnabled: to.BoolPtr(writeAcceleratorEnabled), }) } else { disks = append(disks, diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index f5ee5b58b2..d325e3fc47 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" @@ -29,7 +30,7 @@ import ( // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. -func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { +func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error { vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { @@ -61,11 +62,12 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod } disks = append(disks, compute.DataDisk{ - Name: &diskName, - Lun: &lun, - Caching: compute.CachingTypes(cachingMode), - CreateOption: "attach", - ManagedDisk: managedDisk, + Name: &diskName, + Lun: &lun, + Caching: compute.CachingTypes(cachingMode), + CreateOption: "attach", + ManagedDisk: managedDisk, + WriteAcceleratorEnabled: to.BoolPtr(writeAcceleratorEnabled), }) } else { disks = append(disks, diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 7464d20408..27965b4972 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -963,7 +963,7 @@ func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error { +func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error { return fmt.Errorf("unimplemented") } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_file.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_file.go index bc4106d2fc..ad158ecc9f 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_file.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_file.go @@ -20,17 +20,30 @@ package azure import ( "fmt" + "net/http" azs "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/azure" "k8s.io/klog" + "k8s.io/legacy-cloud-providers/azure/retry" ) const ( useHTTPS = true ) +var ( + // refer https://github.com/Azure/azure-sdk-for-go/blob/master/storage/client.go#L88. + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + // FileClient is the interface for creating file shares, interface for test // injection. type FileClient interface { @@ -53,7 +66,15 @@ func (az *Cloud) resizeFileShare(accountName, accountKey, name string, sizeGiB i } type azureFileClient struct { - env azure.Environment + env *azure.Environment + backoff *retry.Backoff +} + +func newAzureFileClient(env *azure.Environment, backoff *retry.Backoff) *azureFileClient { + return &azureFileClient{ + env: env, + backoff: backoff, + } } func (f *azureFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error { @@ -106,6 +127,15 @@ func (f *azureFileClient) getFileSvcClient(accountName, accountKey string) (*azs if err != nil { return nil, fmt.Errorf("error creating azure client: %v", err) } + + if f.backoff != nil { + fileClient.Sender = &azs.DefaultSender{ + RetryAttempts: f.backoff.Steps, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: f.backoff.Duration, + } + } + fc := fileClient.GetFileService() return &fc, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go index 0087fd56c8..2eb4e18da3 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go @@ -64,7 +64,7 @@ type VMSet interface { EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. - AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string) error + AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. DetachDisk(diskName, diskURI string, nodeName types.NodeName) error // GetDataDisks gets a list of data disks attached to the node. diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 565aaac5ef..4a874be758 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -75,9 +75,11 @@ func newScaleSet(az *Cloud) (VMSet, error) { availabilitySet: newAvailabilitySet(az), } - ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache() - if err != nil { - return nil, err + if !ss.DisableAvailabilitySetNodes { + ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache() + if err != nil { + return nil, err + } } ss.vmssCache, err = ss.newVMSSCache() @@ -117,7 +119,7 @@ func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.Virtua return vmss, nil } - klog.V(3).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName) + klog.V(2).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName) ss.vmssCache.Delete(vmssKey) vmss, err = getter(vmssName) if err != nil { @@ -133,19 +135,21 @@ func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.Virtua // getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. // It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { - getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { + getter := func(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) { + var found bool cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) if err != nil { - return "", "", nil, err + return "", "", nil, found, err } virtualMachines := cached.(*sync.Map) if vm, ok := virtualMachines.Load(nodeName); ok { result := vm.(*vmssVirtualMachinesEntry) - return result.vmssName, result.instanceID, result.virtualMachine, nil + found = true + return result.vmssName, result.instanceID, result.virtualMachine, found, nil } - return "", "", nil, nil + return "", "", nil, found, nil } _, err := getScaleSetVMInstanceID(nodeName) @@ -153,22 +157,24 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin return "", "", nil, err } - vmssName, instanceID, vm, err := getter(nodeName) + vmssName, instanceID, vm, found, err := getter(nodeName, crt) if err != nil { return "", "", nil, err } - if vm != nil { + + if !found { + klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) + vmssName, instanceID, vm, found, err = getter(nodeName, cacheReadTypeForceRefresh) + if err != nil { + return "", "", nil, err + } + } + + if found && vm != nil { return vmssName, instanceID, vm, nil } - klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) - ss.vmssVMCache.Delete(vmssVirtualMachinesKey) - vmssName, instanceID, vm, err = getter(nodeName) - if err != nil { - return "", "", nil, err - } - - if vm == nil { + if !found || vm == nil { return "", "", nil, cloudprovider.InstanceNotFound } return vmssName, instanceID, vm, nil @@ -199,7 +205,7 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) { - getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { + getter := func(crt cacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) if err != nil { return nil, false, err @@ -222,21 +228,21 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI return vm, found, nil } - vm, found, err := getter() - if err != nil { - return nil, err - } - if found { - return vm, nil - } - - klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID) - ss.vmssVMCache.Delete(vmssVirtualMachinesKey) - vm, found, err = getter() + vm, found, err := getter(crt) if err != nil { return nil, err } if !found { + klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID) + vm, found, err = getter(cacheReadTypeForceRefresh) + if err != nil { + return nil, err + } + } + if found && vm != nil { + return vm, nil + } + if !found || vm == nil { return nil, cloudprovider.InstanceNotFound } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 4df9dcaa5d..8a78d5a45e 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -111,6 +111,26 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry + oldCache := make(map[string]vmssVirtualMachinesEntry) + + if ss.vmssVMCache != nil { + // get old cache before refreshing the cache + entry, exists, err := ss.vmssVMCache.store.GetByKey(vmssVirtualMachinesKey) + if err != nil { + return nil, err + } + if exists { + cached := entry.(*cacheEntry).data + if cached != nil { + virtualMachines := cached.(*sync.Map) + virtualMachines.Range(func(key, value interface{}) bool { + oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry) + return true + }) + } + } + } + allResourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err @@ -136,15 +156,52 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { } computerName := strings.ToLower(*vm.OsProfile.ComputerName) - localCache.Store(computerName, &vmssVirtualMachinesEntry{ + vmssVMCacheEntry := &vmssVirtualMachinesEntry{ resourceGroup: resourceGroup, vmssName: ssName, instanceID: to.String(vm.InstanceID), virtualMachine: &vm, lastUpdate: time.Now().UTC(), - }) + } + // set cache entry to nil when the VM is under deleting. + if vm.VirtualMachineScaleSetVMProperties != nil && + strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) + vmssVMCacheEntry.virtualMachine = nil + } + localCache.Store(computerName, vmssVMCacheEntry) + + if _, exists := oldCache[computerName]; exists { + delete(oldCache, computerName) + } } } + + // add old missing cache data with nil entries to prevent aggressive + // ARM calls during cache invalidation + for name, vmEntry := range oldCache { + // if the nil cache entry has existed for 15 minutes in the cache + // then it should not be added back to the cache + if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute { + klog.V(5).Infof("ignoring expired entries from old cache for %s", name) + continue + } + lastUpdate := time.Now().UTC() + if vmEntry.virtualMachine == nil { + // if this is already a nil entry then keep the time the nil + // entry was first created, so we can cleanup unwanted entries + lastUpdate = vmEntry.lastUpdate + } + + klog.V(5).Infof("adding old entries to new cache for %s", name) + localCache.Store(name, &vmssVirtualMachinesEntry{ + resourceGroup: vmEntry.resourceGroup, + vmssName: vmEntry.vmssName, + instanceID: vmEntry.instanceID, + virtualMachine: nil, + lastUpdate: lastUpdate, + }) + } } return localCache, nil @@ -199,6 +256,12 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) { } func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt cacheReadType) (bool, error) { + // Assume all nodes are managed by VMSS when DisableAvailabilitySetNodes is enabled. + if ss.DisableAvailabilitySetNodes { + klog.V(2).Infof("Assuming node %q is managed by VMSS since DisableAvailabilitySetNodes is set to true", nodeName) + return false, nil + } + cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt) if err != nil { return false, err diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go index df936e1385..4b2eac0c27 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/azure_wrap.go @@ -27,6 +27,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" @@ -193,6 +194,12 @@ func (az *Cloud) newVMCache() (*timedCache, error) { return nil, nil } + if vm.VirtualMachineProperties != nil && + strings.EqualFold(to.String(vm.VirtualMachineProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + klog.V(2).Infof("Virtual machine %q is under deleting", key) + return nil, nil + } + return &vm, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go index bf0ec8df85..e2fffdbcdf 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go @@ -38,6 +38,15 @@ const ( var ( // The function to get current time. now = time.Now + + // StatusCodesForRetry are a defined group of status code for which the client will retry. + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } ) // Error indicates an error returned by Azure APIs. @@ -184,18 +193,21 @@ func getHTTPStatusCode(resp *http.Response) int { // shouldRetryHTTPRequest determines if the request is retriable. func shouldRetryHTTPRequest(resp *http.Response, err error) bool { if resp != nil { - // HTTP 412 (StatusPreconditionFailed) means etag mismatch - // HTTP 400 (BadRequest) means the request cannot be accepted, hence we shouldn't retry. - if resp.StatusCode == http.StatusPreconditionFailed || resp.StatusCode == http.StatusBadRequest { - return false + for _, code := range StatusCodesForRetry { + if resp.StatusCode == code { + return true + } } - // HTTP 4xx (except 412) or 5xx suggests we should retry. - if 399 < resp.StatusCode && resp.StatusCode < 600 { + // should retry on <200, error>. + if isSuccessHTTPResponse(resp) && err != nil { return true } + + return false } + // should retry when error is not nil and no http.Response. if err != nil { return true } @@ -224,6 +236,24 @@ func getRetryAfter(resp *http.Response) time.Duration { return dur } +// GetErrorWithRetriableHTTPStatusCodes gets an error with RetriableHTTPStatusCodes. +// It is used to retry on some HTTPStatusCodes. +func GetErrorWithRetriableHTTPStatusCodes(resp *http.Response, err error, retriableHTTPStatusCodes []int) *Error { + rerr := GetError(resp, err) + if rerr == nil { + return nil + } + + for _, code := range retriableHTTPStatusCodes { + if rerr.HTTPStatusCode == code { + rerr.Retriable = true + break + } + } + + return rerr +} + // GetStatusNotFoundAndForbiddenIgnoredError gets an error with StatusNotFound and StatusForbidden ignored. // It is only used in DELETE operations. func GetStatusNotFoundAndForbiddenIgnoredError(resp *http.Response, err error) *Error { diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go index 120b7dffaa..64d9f0fa46 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go @@ -58,6 +58,8 @@ type Backoff struct { Cap time.Duration // The errors indicate that the request shouldn't do more retrying. NonRetriableErrors []string + // The RetriableHTTPStatusCodes indicates that the HTTPStatusCode should do more retrying. + RetriableHTTPStatusCodes []int } // NewBackoff creates a new Backoff. @@ -78,6 +80,13 @@ func (b *Backoff) WithNonRetriableErrors(errs []string) *Backoff { return &newBackoff } +// WithRetriableHTTPStatusCodes returns a new *Backoff with RetriableHTTPStatusCode assigned. +func (b *Backoff) WithRetriableHTTPStatusCodes(httpStatusCodes []int) *Backoff { + newBackoff := *b + newBackoff.RetriableHTTPStatusCodes = httpStatusCodes + return &newBackoff +} + // isNonRetriableError returns true if the Error is one of NonRetriableErrors. func (b *Backoff) isNonRetriableError(rerr *Error) bool { if rerr == nil { @@ -154,7 +163,7 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff *Backoff) (resp return } resp, err = s.Do(rr.Request()) - rerr := GetError(resp, err) + rerr := GetErrorWithRetriableHTTPStatusCodes(resp, err, backoff.RetriableHTTPStatusCodes) // Abort retries in the following scenarios: // 1) request succeed // 2) request is not retriable diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/BUILD b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/BUILD index cb68d40eaa..0ce0a4f618 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/BUILD @@ -50,11 +50,13 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/pkg/version:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go index 70f46dc3a4..c4c33e446b 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce.go @@ -68,8 +68,9 @@ const ( // AffinityTypeClientIP - affinity based on Client IP. gceAffinityTypeClientIP = "CLIENT_IP" - operationPollInterval = time.Second - maxTargetPoolCreateInstances = 200 + operationPollInterval = time.Second + maxTargetPoolCreateInstances = 200 + maxInstancesPerTargetPoolUpdate = 1000 // HTTP Load Balancer parameters // Configure 8 second period for external health checks. diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go index b2e2a1deaf..824381b2b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go @@ -19,6 +19,7 @@ limitations under the License. package gce import ( + "context" "crypto/rand" "encoding/hex" "errors" @@ -199,7 +200,7 @@ func (ci *ClusterID) getOrInitialize() error { UIDProvider: newID, } - if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { + if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(context.TODO(), cfg, metav1.CreateOptions{}); err != nil { klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go index 09a516c655..72936211fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go @@ -35,6 +35,7 @@ type TestClusterValues struct { SecondaryZoneName string ClusterID string ClusterName string + OnXPN bool } // DefaultTestClusterValues Creates a reasonable set of default cluster values @@ -74,8 +75,14 @@ func NewFakeGCECloud(vals TestClusterValues) *Cloud { projectID: vals.ProjectID, networkProjectID: vals.ProjectID, ClusterID: fakeClusterID(vals.ClusterID), + onXPN: vals.OnXPN, } c := cloud.NewMockGCE(&gceProjectRouter{gce}) gce.c = c return gce } + +// UpdateFakeGCECloud updates the fake GCE cloud with the specified values. Currently only the onXPN value is updated. +func UpdateFakeGCECloud(g *Cloud, vals TestClusterValues) { + g.onXPN = vals.OnXPN +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go index 7fb9ce22c7..331903082d 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go @@ -591,16 +591,32 @@ func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) toRemove = append(toRemove, &compute.InstanceReference{Instance: link}) } - if len(toAdd) > 0 { - if err := g.AddInstancesToTargetPool(loadBalancerName, g.region, toAdd); err != nil { + for len(toAdd) > 0 { + // Do not remove more than maxInstancesPerTargetPoolUpdate in a single call. + instancesCount := len(toAdd) + if instancesCount > maxInstancesPerTargetPoolUpdate { + instancesCount = maxInstancesPerTargetPoolUpdate + } + // The operation to add 1000 instances is fairly long (may take minutes), so + // we don't need to worry about saturating QPS limits. + if err := g.AddInstancesToTargetPool(loadBalancerName, g.region, toAdd[:instancesCount]); err != nil { return err } + toAdd = toAdd[instancesCount:] } - if len(toRemove) > 0 { - if err := g.RemoveInstancesFromTargetPool(loadBalancerName, g.region, toRemove); err != nil { + for len(toRemove) > 0 { + // Do not remove more than maxInstancesPerTargetPoolUpdate in a single call. + instancesCount := len(toRemove) + if instancesCount > maxInstancesPerTargetPoolUpdate { + instancesCount = maxInstancesPerTargetPoolUpdate + } + // The operation to remove 1000 instances is fairly long (may take minutes), so + // we don't need to worry about saturating QPS limits. + if err := g.RemoveInstancesFromTargetPool(loadBalancerName, g.region, toRemove[:instancesCount]); err != nil { return err } + toRemove = toRemove[instancesCount:] } // Try to verify that the correct number of nodes are now in the target pool. diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go index 72d554756d..86290a963e 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go @@ -41,12 +41,30 @@ import ( const ( // Used to list instances in all states(RUNNING and other) - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/listInstances allInstances = "ALL" + // ILBFinalizerV1 key is used to identify ILB services whose resources are managed by service controller. + ILBFinalizerV1 = "gke.networking.io/l4-ilb-v1" + // ILBFinalizerV2 is the finalizer used by newer controllers that implement Internal LoadBalancer services. + ILBFinalizerV2 = "gke.networking.io/l4-ilb-v2" ) func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - if g.AlphaFeatureGate.Enabled(AlphaFeatureILBSubsets) { + if g.AlphaFeatureGate.Enabled(AlphaFeatureILBSubsets) && existingFwdRule == nil { + // When ILBSubsets is enabled, new ILB services will not be processed here. + // Services that have existing GCE resources created by this controller will continue to update. + g.eventRecorder.Eventf(svc, v1.EventTypeNormal, "SkippingEnsureInternalLoadBalancer", + "Skipped ensureInternalLoadBalancer since %s feature is enabled.", AlphaFeatureILBSubsets) return nil, cloudprovider.ImplementedElsewhere } + if hasFinalizer(svc, ILBFinalizerV2) { + // Another controller is handling the resources for this service. + g.eventRecorder.Eventf(svc, v1.EventTypeNormal, "SkippingEnsureInternalLoadBalancer", + "Skipped ensureInternalLoadBalancer as service contains '%s' finalizer.", ILBFinalizerV2) + return nil, cloudprovider.ImplementedElsewhere + } + if err := addFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil { + klog.Errorf("Failed to attach finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err) + return nil, err + } nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} ports, _, protocol := getPortsAndProtocol(svc.Spec.Ports) @@ -298,6 +316,11 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, return err } + if err := removeFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil { + klog.Errorf("Failed to remove finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err) + return err + } + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go index 046c599dd4..3b0ba36edc 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -19,6 +19,7 @@ limitations under the License. package gce import ( + "context" "errors" "fmt" "net" @@ -32,12 +33,18 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + + "encoding/json" "cloud.google.com/go/compute/metadata" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/client-go/kubernetes/fake" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" ) func fakeGCECloud(vals TestClusterValues) (*Cloud, error) { @@ -45,6 +52,7 @@ func fakeGCECloud(vals TestClusterValues) (*Cloud, error) { gce.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) gce.nodeInformerSynced = func() bool { return true } + gce.client = fake.NewSimpleClientset() mockGCE := gce.c.(*cloud.MockGCE) mockGCE.MockTargetPools.AddInstanceHook = mock.AddInstanceHook @@ -75,6 +83,34 @@ func fakeGCECloud(vals TestClusterValues) (*Cloud, error) { return gce, nil } +func registerTargetPoolAddInstanceHook(gce *Cloud, callback func(*compute.TargetPoolsAddInstanceRequest)) error { + mockGCE, ok := gce.c.(*cloud.MockGCE) + if !ok { + return fmt.Errorf("couldn't cast cloud to mockGCE: %#v", gce) + } + existingHandler := mockGCE.MockTargetPools.AddInstanceHook + hook := func(ctx context.Context, key *meta.Key, req *compute.TargetPoolsAddInstanceRequest, m *cloud.MockTargetPools) error { + callback(req) + return existingHandler(ctx, key, req, m) + } + mockGCE.MockTargetPools.AddInstanceHook = hook + return nil +} + +func registerTargetPoolRemoveInstanceHook(gce *Cloud, callback func(*compute.TargetPoolsRemoveInstanceRequest)) error { + mockGCE, ok := gce.c.(*cloud.MockGCE) + if !ok { + return fmt.Errorf("couldn't cast cloud to mockGCE: %#v", gce) + } + existingHandler := mockGCE.MockTargetPools.RemoveInstanceHook + hook := func(ctx context.Context, key *meta.Key, req *compute.TargetPoolsRemoveInstanceRequest, m *cloud.MockTargetPools) error { + callback(req) + return existingHandler(ctx, key, req, m) + } + mockGCE.MockTargetPools.RemoveInstanceHook = hook + return nil +} + type gceInstance struct { Zone string Name string @@ -316,3 +352,85 @@ func typeOfNetwork(network *compute.Network) netType { func getLocationName(project, zoneOrRegion string) string { return fmt.Sprintf("projects/%s/locations/%s", project, zoneOrRegion) } + +func addFinalizer(service *v1.Service, kubeClient v1core.CoreV1Interface, key string) error { + if hasFinalizer(service, key) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, key) + + // TODO(87447) use PatchService from k8s.io/cloud-provider/service/helpers + _, err := patchService(kubeClient, service, updated) + return err +} + +// removeFinalizer patches the service to remove finalizer. +func removeFinalizer(service *v1.Service, kubeClient v1core.CoreV1Interface, key string) error { + if !hasFinalizer(service, key) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = removeString(updated.ObjectMeta.Finalizers, key) + + _, err := patchService(kubeClient, service, updated) + return err +} + +//hasFinalizer returns if the given service has the specified key in its list of finalizers. +func hasFinalizer(service *v1.Service, key string) bool { + for _, finalizer := range service.ObjectMeta.Finalizers { + if finalizer == key { + return true + } + } + return false +} + +// removeString returns a newly created []string that contains all items from slice that +// are not equal to s. +func removeString(slice []string, s string) []string { + var newSlice []string + for _, item := range slice { + if item != s { + newSlice = append(newSlice, item) + } + } + return newSlice +} + +// patchService patches service's Status or ObjectMeta given the origin and +// updated ones. Change to spec will be ignored. +func patchService(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v1.Service, error) { + // Reset spec to make sure only patch for Status or ObjectMeta. + newSvc.Spec = oldSvc.Spec + + patchBytes, err := getPatchBytes(oldSvc, newSvc) + if err != nil { + return nil, err + } + + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") +} + +func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) { + oldData, err := json.Marshal(oldSvc) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err) + } + + newData, err := json.Marshal(newSvc) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for svc %s/%s: %v", newSvc.Namespace, newSvc.Name, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Service{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err) + } + return patchBytes, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go index c3db860b5e..2c2d25251d 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/openstack/openstack.go @@ -295,7 +295,7 @@ func setConfigFromSecret(cfg *Config) error { return fmt.Errorf("failed to get kubernetes client: %v", err) } - secret, err := k8sClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secret, err := k8sClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { klog.Warningf("Cannot get secret %s in namespace %s. error: %q", secretName, secretNamespace, err) return err diff --git a/cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS b/cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS index c8821ce735..5fa5cc24c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/OWNERS @@ -6,6 +6,7 @@ reviewers: - jsafrane - msau42 - andyzhangx + - gnufied approvers: - jingxu97 - saad-ali diff --git a/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go index 821cc87534..0e7d108704 100644 --- a/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount.go @@ -20,6 +20,7 @@ limitations under the License. package mount import ( + "fmt" "os" "path/filepath" "strings" @@ -76,6 +77,36 @@ type MountPoint struct { Pass int } +type MountErrorType string + +const ( + FilesystemMismatch MountErrorType = "FilesystemMismatch" + HasFilesystemErrors MountErrorType = "HasFilesystemErrors" + UnformattedReadOnly MountErrorType = "UnformattedReadOnly" + FormatFailed MountErrorType = "FormatFailed" +) + +type MountError struct { + Type MountErrorType + Message string +} + +func (mountError *MountError) String() string { + return mountError.Message +} + +func (mountError *MountError) Error() string { + return mountError.Message +} + +func NewMountError(mountErrorValue MountErrorType, format string, args ...interface{}) error { + mountError := &MountError{ + Type: mountErrorValue, + Message: fmt.Sprintf(format, args...), + } + return mountError +} + // SafeFormatAndMount probes a device to see if it is formatted. // Namely it checks to see if a file system is present. If so it // mounts it otherwise the device is formatted first then mounted. diff --git a/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go index 2a91002429..88a46424bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/mount/mount_linux.go @@ -19,7 +19,6 @@ limitations under the License. package mount import ( - "errors" "fmt" "os" "os/exec" @@ -257,6 +256,54 @@ func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { return SearchMountPoints(realpath, procMountInfoPath) } +// checkAndRepairFileSystem checks and repairs filesystems using command fsck. +func (mounter *SafeFormatAndMount) checkAndRepairFilesystem(source string) error { + klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) + args := []string{"-a", source} + out, err := mounter.Exec.Command("fsck", args...).CombinedOutput() + if err != nil { + ee, isExitError := err.(utilexec.ExitError) + switch { + case err == utilexec.ErrExecutableNotFound: + klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") + case isExitError && ee.ExitStatus() == fsckErrorsCorrected: + klog.Infof("Device %s has errors which were corrected by fsck.", source) + case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: + return NewMountError(HasFilesystemErrors, "'fsck' found errors on device %s but could not correct them: %s", source, string(out)) + case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: + klog.Infof("`fsck` error %s", string(out)) + } + } + return nil +} + +// checkAndRepairXfsFilesystem checks and repairs xfs filesystem using command xfs_repair. +func (mounter *SafeFormatAndMount) checkAndRepairXfsFilesystem(source string) error { + klog.V(4).Infof("Checking for issues with xfs_repair on disk: %s", source) + + args := []string{source} + checkArgs := []string{"-n", source} + + // check-only using "xfs_repair -n", if the exit status is not 0, perform a "xfs_repair" + _, err := mounter.Exec.Command("xfs_repair", checkArgs...).CombinedOutput() + if err != nil { + if err == utilexec.ErrExecutableNotFound { + klog.Warningf("'xfs_repair' not found on system; continuing mount without running 'xfs_repair'.") + return nil + } else { + klog.Warningf("Filesystem corruption was detected for %s, running xfs_repair to repair", source) + out, err := mounter.Exec.Command("xfs_repair", args...).CombinedOutput() + if err != nil { + return NewMountError(HasFilesystemErrors, "'xfs_repair' found errors on device %s but could not correct them: %s\n", source, out) + } else { + klog.Infof("Device %s has errors which were corrected by xfs_repair.", source) + return nil + } + } + } + return nil +} + // formatAndMount uses unix utils to format and mount the given disk func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { readOnly := false @@ -268,76 +315,73 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } options = append(options, "defaults") + var mountErrorValue MountErrorType - if !readOnly { - // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. - klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) - args := []string{"-a", source} - out, err := mounter.Exec.Command("fsck", args...).CombinedOutput() - if err != nil { - ee, isExitError := err.(utilexec.ExitError) - switch { - case err == utilexec.ErrExecutableNotFound: - klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") - case isExitError && ee.ExitStatus() == fsckErrorsCorrected: - klog.Infof("Device %s has errors which were corrected by fsck.", source) - case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: - return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s", source, string(out)) - case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: - klog.Infof("`fsck` error %s", string(out)) - } - } + // Check if the disk is already formatted + existingFormat, err := mounter.GetDiskFormat(source) + if err != nil { + return err } - // Try to mount the disk - klog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) - mountErr := mounter.Interface.Mount(source, target, fstype, options) - if mountErr != nil { - // Mount failed. This indicates either that the disk is unformatted or - // it contains an unexpected filesystem. - existingFormat, err := mounter.GetDiskFormat(source) - if err != nil { - return err + // Use 'ext4' as the default + if len(fstype) == 0 { + fstype = "ext4" + } + + if existingFormat == "" { + // Do not attempt to format the disk if mounting as readonly, return an error to reflect this. + if readOnly { + return NewMountError(UnformattedReadOnly, "cannot mount unformatted disk %s as we are manipulating it in read-only mode", source) } - if existingFormat == "" { - if readOnly { - // Don't attempt to format if mounting as readonly, return an error to reflect this. - return errors.New("failed to mount unformatted volume as read only") - } - // Disk is unformatted so format it. - args := []string{source} - // Use 'ext4' as the default - if len(fstype) == 0 { - fstype = "ext4" + // Disk is unformatted so format it. + args := []string{source} + if fstype == "ext4" || fstype == "ext3" { + args = []string{ + "-F", // Force flag + "-m0", // Zero blocks reserved for super-user + source, } + } - if fstype == "ext4" || fstype == "ext3" { - args = []string{ - "-F", // Force flag - "-m0", // Zero blocks reserved for super-user - source, - } - } - klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) - _, err := mounter.Exec.Command("mkfs."+fstype, args...).CombinedOutput() - if err == nil { - // the disk has been formatted successfully try to mount it again. - klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) - return mounter.Interface.Mount(source, target, fstype, options) - } + klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) + _, err := mounter.Exec.Command("mkfs."+fstype, args...).CombinedOutput() + if err != nil { klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) - return err + return NewMountError(FormatFailed, err.Error()) } - // Disk is already formatted and failed to mount - if len(fstype) == 0 || fstype == existingFormat { - // This is mount error - return mountErr + + klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) + } else { + if fstype != existingFormat { + // Verify that the disk is formatted with filesystem type we are expecting + mountErrorValue = FilesystemMismatch + klog.Warningf("Configured to mount disk %s as %s but current format is %s, things might break", source, existingFormat, fstype) + } + + if !readOnly { + // Run check tools on the disk to fix repairable issues, only do this for formatted volumes requested as rw. + var err error + switch existingFormat { + case "xfs": + err = mounter.checkAndRepairXfsFilesystem(source) + default: + err = mounter.checkAndRepairFilesystem(source) + } + + if err != nil { + return err + } } - // Block device is formatted with unexpected filesystem, let the user know - return fmt.Errorf("failed to mount the volume as %q, it already contains %s. Mount error: %v", fstype, existingFormat, mountErr) } - return mountErr + + // Mount the disk + klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target) + if err := mounter.Interface.Mount(source, target, fstype, options); err != nil { + return NewMountError(mountErrorValue, err.Error()) + } + + return nil } // GetDiskFormat uses 'blkid' to see if the given disk is unformatted diff --git a/cluster-autoscaler/vendor/k8s.io/utils/net/ipnet.go b/cluster-autoscaler/vendor/k8s.io/utils/net/ipnet.go index abbb37a546..c2e844bf5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/utils/net/ipnet.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/net/ipnet.go @@ -17,6 +17,7 @@ limitations under the License. package net import ( + "fmt" "net" "strings" ) @@ -119,3 +120,102 @@ func (s IPNetSet) Equal(s2 IPNetSet) bool { func (s IPNetSet) Len() int { return len(s) } + +// IPSet maps string to net.IP +type IPSet map[string]net.IP + +// ParseIPSet parses string slice to IPSet +func ParseIPSet(items ...string) (IPSet, error) { + ipset := make(IPSet) + for _, item := range items { + ip := net.ParseIP(strings.TrimSpace(item)) + if ip == nil { + return nil, fmt.Errorf("error parsing IP %q", item) + } + + ipset[ip.String()] = ip + } + + return ipset, nil +} + +// Insert adds items to the set. +func (s IPSet) Insert(items ...net.IP) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPSet) Delete(items ...net.IP) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPSet) Has(item net.IP) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPSet) HasAll(items ...net.IP) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPSet) Difference(s2 IPSet) IPSet { + result := make(IPSet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPSet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPSet) IsSuperset(s2 IPSet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPSet) Equal(s2 IPSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPSet) Len() int { + return len(s) +} diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 803e97c259..f53e3bdf69 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -119,6 +119,11 @@ github.com/beorn7/perks/quantile github.com/blang/semver # github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48 => github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48 github.com/checkpoint-restore/go-criu/rpc +# github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed => github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed +github.com/cilium/ebpf +github.com/cilium/ebpf/asm +github.com/cilium/ebpf/internal +github.com/cilium/ebpf/internal/unix # github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 github.com/clusterhq/flocker-go # github.com/container-storage-interface/spec v1.2.0 => github.com/container-storage-interface/spec v1.2.0 @@ -194,7 +199,7 @@ github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.4.0 => github.com/docker/go-units v0.4.0 github.com/docker/go-units -# github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 +# github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 github.com/docker/libnetwork/ipvs # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream @@ -289,7 +294,7 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/go-querystring v1.0.0 => github.com/google/go-querystring v1.0.0 github.com/google/go-querystring/query -# github.com/google/gofuzz v1.0.0 => github.com/google/gofuzz v1.0.0 +# github.com/google/gofuzz v1.1.0 => github.com/google/gofuzz v1.1.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 => github.com/google/uuid v1.1.1 github.com/google/uuid @@ -395,11 +400,15 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 => github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.0-rc9 => github.com/opencontainers/runc v1.0.0-rc9 +# github.com/opencontainers/runc v1.0.0-rc10 => github.com/opencontainers/runc v1.0.0-rc10 github.com/opencontainers/runc/libcontainer github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups +github.com/opencontainers/runc/libcontainer/cgroups/ebpf +github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter github.com/opencontainers/runc/libcontainer/cgroups/fs +github.com/opencontainers/runc/libcontainer/cgroups/fs2 +github.com/opencontainers/runc/libcontainer/cgroups/fscommon github.com/opencontainers/runc/libcontainer/cgroups/systemd github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/configs/validate @@ -412,6 +421,7 @@ github.com/opencontainers/runc/libcontainer/stacktrace github.com/opencontainers/runc/libcontainer/system github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/utils +github.com/opencontainers/runc/types # github.com/opencontainers/runtime-spec v1.0.0 => github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 => github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52 @@ -425,7 +435,7 @@ github.com/pmezard/go-difflib/difflib github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 => github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 +# github.com/prometheus/client_model v0.2.0 => github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go # github.com/prometheus/common v0.4.1 => github.com/prometheus/common v0.4.1 github.com/prometheus/common/expfmt @@ -582,7 +592,7 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e => golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/singleflight -# golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a +# golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry @@ -677,7 +687,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.8 => gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/api +# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/api k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -720,7 +730,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/apimachinery +# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/apimachinery k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -778,7 +788,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/apiserver +# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/apiserver k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -862,7 +872,7 @@ k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/wsstream k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/client-go +# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/client-go k8s.io/client-go/discovery k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic @@ -1089,14 +1099,14 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cloud-provider +# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cloud-provider k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/component-base +# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/component-base k8s.io/component-base/cli/flag k8s.io/component-base/codec k8s.io/component-base/config @@ -1108,10 +1118,10 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/prometheus/restclient k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/cri-api +# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/cri-api k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/csi-translation-lib +# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/csi-translation-lib k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog v1.0.0 => k8s.io/klog v1.0.0 @@ -1123,15 +1133,15 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kube-proxy +# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kube-proxy k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kubectl +# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kubectl k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/kubelet +# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/kubelet k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/pluginregistration/v1 -# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes +# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes k8s.io/kubernetes/cmd/kube-proxy/app k8s.io/kubernetes/cmd/kubelet/app k8s.io/kubernetes/cmd/kubelet/app/options @@ -1422,7 +1432,7 @@ k8s.io/kubernetes/pkg/volume/vsphere_volume k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/test/utils k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.RRRk/kubernetes/staging/src/k8s.io/legacy-cloud-providers +# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.CTcs/kubernetes/staging/src/k8s.io/legacy-cloud-providers k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -1449,7 +1459,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc => k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc +# k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 => k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/exec @@ -1464,11 +1474,11 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/strings k8s.io/utils/trace -# sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd +# sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c sigs.k8s.io/structured-merge-diff/v3/fieldpath sigs.k8s.io/structured-merge-diff/v3/merge sigs.k8s.io/structured-merge-diff/v3/schema sigs.k8s.io/structured-merge-diff/v3/typed sigs.k8s.io/structured-merge-diff/v3/value -# sigs.k8s.io/yaml v1.1.0 => sigs.k8s.io/yaml v1.1.0 +# sigs.k8s.io/yaml v1.2.0 => sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/fromvalue.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/fromvalue.go index 2fc74765fa..36b2d4a916 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/fromvalue.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/fromvalue.go @@ -25,9 +25,10 @@ func SetFromValue(v value.Value) *Set { s := NewSet() w := objectWalker{ - path: Path{}, - value: v, - do: func(p Path) { s.Insert(p) }, + path: Path{}, + value: v, + allocator: value.NewFreelistAllocator(), + do: func(p Path) { s.Insert(p) }, } w.walk() @@ -35,8 +36,9 @@ func SetFromValue(v value.Value) *Set { } type objectWalker struct { - path Path - value value.Value + path Path + value value.Value + allocator value.Allocator do func(Path) } @@ -55,11 +57,15 @@ func (w *objectWalker) walk() { case w.value.IsList(): // If the list were atomic, we'd break here, but we don't have // a schema, so we can't tell. - list := w.value.AsList() - for i := 0; i < list.Length(); i++ { + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, value := iter.Item() w2 := *w - w2.path = append(w.path, GuessBestListPathElement(i, list.At(i))) - w2.value = list.At(i) + w2.path = append(w.path, w.GuessBestListPathElement(i, value)) + w2.value = value w2.walk() } return @@ -67,7 +73,9 @@ func (w *objectWalker) walk() { // If the map/struct were atomic, we'd break here, but we don't // have a schema, so we can't tell. - w.value.AsMap().Iterate(func(k string, val value.Value) bool { + m := w.value.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + m.IterateUsing(w.allocator, func(k string, val value.Value) bool { w2 := *w w2.path = append(w.path, PathElement{FieldName: &k}) w2.value = val @@ -96,7 +104,7 @@ var AssociativeListCandidateFieldNames = []string{ // referencing by index is acceptable. Currently this is done by checking // whether item has any of the fields listed in // AssociativeListCandidateFieldNames which have scalar values. -func GuessBestListPathElement(index int, item value.Value) PathElement { +func (w *objectWalker) GuessBestListPathElement(index int, item value.Value) PathElement { if !item.IsMap() { // Non map items could be parts of sets or regular "atomic" // lists. We won't try to guess whether something should be a @@ -104,9 +112,11 @@ func GuessBestListPathElement(index int, item value.Value) PathElement { return PathElement{Index: &index} } + m := item.AsMapUsing(w.allocator) + defer w.allocator.Free(m) var keys value.FieldList for _, name := range AssociativeListCandidateFieldNames { - f, ok := item.AsMap().Get(name) + f, ok := m.Get(name) if !ok { continue } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/serialize.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/serialize.go index 76d2e15e55..b992b93c5f 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/serialize.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath/serialize.go @@ -40,7 +40,7 @@ func (s *Set) ToJSONStream(w io.Writer) error { var r reusableBuilder stream.WriteObjectStart() - err := s.emitContents_v1(false, stream, &r) + err := s.emitContentsV1(false, stream, &r) if err != nil { return err } @@ -76,7 +76,7 @@ func (r *reusableBuilder) reset() *bytes.Buffer { return &r.Buffer } -func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error { +func (s *Set) emitContentsV1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error { mi, ci := 0, 0 first := true preWrite := func() { @@ -97,7 +97,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus mpe := s.Members.members[mi] cpe := s.Children.members[ci].pathElement - if mpe.Less(cpe) { + if c := mpe.Compare(cpe); c < 0 { preWrite() if err := serializePathElementToWriter(r.reset(), mpe); err != nil { return err @@ -105,14 +105,14 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus stream.WriteObjectField(r.unsafeString()) stream.WriteEmptyObject() mi++ - } else if cpe.Less(mpe) { + } else if c > 0 { preWrite() if err := serializePathElementToWriter(r.reset(), cpe); err != nil { return err } stream.WriteObjectField(r.unsafeString()) stream.WriteObjectStart() - if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil { + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { return err } stream.WriteObjectEnd() @@ -124,7 +124,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus } stream.WriteObjectField(r.unsafeString()) stream.WriteObjectStart() - if err := s.Children.members[ci].set.emitContents_v1(true, stream, r); err != nil { + if err := s.Children.members[ci].set.emitContentsV1(true, stream, r); err != nil { return err } stream.WriteObjectEnd() @@ -154,7 +154,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus } stream.WriteObjectField(r.unsafeString()) stream.WriteObjectStart() - if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil { + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { return err } stream.WriteObjectEnd() @@ -169,7 +169,7 @@ func (s *Set) FromJSON(r io.Reader) error { // The iterator pool is completely useless for memory management, grrr. iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096) - found, _ := readIter_v1(iter) + found, _ := readIterV1(iter) if found == nil { *s = Set{} } else { @@ -180,7 +180,7 @@ func (s *Set) FromJSON(r io.Reader) error { // returns true if this subtree is also (or only) a member of parent; s is nil // if there are no further children. -func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) { +func readIterV1(iter *jsoniter.Iterator) (children *Set, isMember bool) { iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { if key == "." { isMember = true @@ -199,7 +199,7 @@ func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) { iter.Skip() return true } - grandchildren, childIsMember := readIter_v1(iter) + grandchildren, childIsMember := readIterV1(iter) if childIsMember { if children == nil { children = &Set{} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/helpers.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/helpers.go index 90069e2dd7..0834f4d327 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/helpers.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/helpers.go @@ -154,7 +154,7 @@ func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErro } // Returns the list, or an error. Reminder: nil is a valid list and might be returned. -func listValue(val value.Value) (value.List, error) { +func listValue(a value.Allocator, val value.Value) (value.List, error) { if val.IsNull() { // Null is a valid list. return nil, nil @@ -162,11 +162,11 @@ func listValue(val value.Value) (value.List, error) { if !val.IsList() { return nil, fmt.Errorf("expected list, got %v", val) } - return val.AsList(), nil + return val.AsListUsing(a), nil } // Returns the map, or an error. Reminder: nil is a valid map and might be returned. -func mapValue(val value.Value) (value.Map, error) { +func mapValue(a value.Allocator, val value.Value) (value.Map, error) { if val == nil { return nil, fmt.Errorf("expected map, got nil") } @@ -177,10 +177,10 @@ func mapValue(val value.Value) (value.Map, error) { if !val.IsMap() { return nil, fmt.Errorf("expected map, got %v", val) } - return val.AsMap(), nil + return val.AsMapUsing(a), nil } -func keyedAssociativeListItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // For now, the keys are required which means that null entries @@ -191,8 +191,10 @@ func keyedAssociativeListItemToPathElement(list *schema.List, index int, child v return pe, errors.New("associative list with keys may not have non-map elements") } keyMap := value.FieldList{} + m := child.AsMapUsing(a) + defer a.Free(m) for _, fieldName := range list.Keys { - if val, ok := child.AsMap().Get(fieldName); ok { + if val, ok := m.Get(fieldName); ok { keyMap = append(keyMap, value.Field{Name: fieldName, Value: val}) } else { return pe, fmt.Errorf("associative list with keys has an element that omits key field %q", fieldName) @@ -223,10 +225,10 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func listItemToPathElement(a value.Allocator, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { if list.ElementRelationship == schema.Associative { if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(list, index, child) + return keyedAssociativeListItemToPathElement(a, list, index, child) } // If there's no keys, then we must be a set of primitives. diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/merge.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/merge.go index 93a1002046..e9573cc231 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/merge.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/merge.go @@ -48,6 +48,8 @@ type mergingWalker struct { // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*mergingWalker + + allocator value.Allocator } // merge rules examine w.lhs and w.rhs (up to one of which may be nil) and @@ -148,18 +150,15 @@ func (w *mergingWalker) finishDescent(w2 *mergingWalker) { *w.spareWalkers = append(*w.spareWalkers, w2) } -func (w *mergingWalker) derefMap(prefix string, v value.Value, dest *value.Map) (errs ValidationErrors) { - // taking dest as input so that it can be called as a one-liner with - // append. +func (w *mergingWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { if v == nil { - return nil + return nil, nil } - m, err := mapValue(v) + m, err := mapValue(w.allocator, v) if err != nil { - return errorf("%v: %v", prefix, err) + return nil, errorf("%v: %v", prefix, err) } - *dest = m - return nil + return m, nil } func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { @@ -184,7 +183,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err if rhs != nil { for i := 0; i < rhs.Length(); i++ { child := rhs.At(i) - pe, err := listItemToPathElement(t, i, child) + pe, err := listItemToPathElement(w.allocator, t, i, child) if err != nil { errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -205,7 +204,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err if lhs != nil { for i := 0; i < lhs.Length(); i++ { child := lhs.At(i) - pe, err := listItemToPathElement(t, i, child) + pe, err := listItemToPathElement(w.allocator, t, i, child) if err != nil { errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -253,24 +252,26 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) derefList(prefix string, v value.Value, dest *value.List) (errs ValidationErrors) { - // taking dest as input so that it can be called as a one-liner with - // append. +func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { if v == nil { - return nil + return nil, nil } - l, err := listValue(v) + l, err := listValue(w.allocator, v) if err != nil { - return errorf("%v: %v", prefix, err) + return nil, errorf("%v: %v", prefix, err) } - *dest = l - return nil + return l, nil } func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) { - var lhs, rhs value.List - w.derefList("lhs: ", w.lhs, &lhs) - w.derefList("rhs: ", w.rhs, &rhs) + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } // If both lhs and rhs are empty/null, treat it as a // leaf: this helps preserve the empty/null @@ -311,31 +312,10 @@ func (w *mergingWalker) visitMapItem(t *schema.Map, out map[string]interface{}, func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { out := map[string]interface{}{} - if lhs != nil { - lhs.Iterate(func(key string, val value.Value) bool { - var rval value.Value - if rhs != nil { - if item, ok := rhs.Get(key); ok { - rval = item - defer rval.Recycle() - } - } - errs = append(errs, w.visitMapItem(t, out, key, val, rval)...) - return true - }) - } - - if rhs != nil { - rhs.Iterate(func(key string, val value.Value) bool { - if lhs != nil { - if lhs.Has(key) { - return true - } - } - errs = append(errs, w.visitMapItem(t, out, key, nil, val)...) - return true - }) - } + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) if len(out) > 0 { i := interface{}(out) w.out = &i @@ -345,14 +325,18 @@ func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs V } func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) { - var lhs, rhs value.Map - w.derefMap("lhs: ", w.lhs, &lhs) - w.derefMap("rhs: ", w.rhs, &rhs) - + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } // If both lhs and rhs are empty/null, treat it as a // leaf: this helps preserve the empty/null // distinction. - emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { w.doLeaf() diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/remove.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/remove.go index 7753d69df6..f30e02a618 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/remove.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/remove.go @@ -20,17 +20,19 @@ import ( ) type removingWalker struct { - value value.Value - out interface{} - schema *schema.Schema - toRemove *fieldpath.Set + value value.Value + out interface{} + schema *schema.Schema + toRemove *fieldpath.Set + allocator value.Allocator } func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef) value.Value { w := &removingWalker{ - value: val, - schema: schema, - toRemove: toRemove, + value: val, + schema: schema, + toRemove: toRemove, + allocator: value.NewFreelistAllocator(), } resolveSchema(schema, typeRef, val, w) return value.NewValueInterface(w.out) @@ -42,18 +44,20 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { - l := w.value.AsList() - + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) // If list is null, empty, or atomic just return if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { return nil } var newItems []interface{} - for i := 0; i < l.Length(); i++ { - item := l.At(i) + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(t, i, item) + pe, _ := listItemToPathElement(w.allocator, t, i, item) path, _ := fieldpath.MakePath(pe) if w.toRemove.Has(path) { continue @@ -70,10 +74,12 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { - m := w.value.AsMap() - + m := w.value.AsMapUsing(w.allocator) + if m != nil { + defer w.allocator.Free(m) + } // If map is null, empty, or atomic just return - if m == nil || m.Length() == 0 || t.ElementRelationship == schema.Atomic { + if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { return nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/tofieldset.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/tofieldset.go index 1c11eacdb1..b3c4ff0021 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/tofieldset.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/tofieldset.go @@ -34,6 +34,7 @@ func (tv TypedValue) toFieldSetWalker() *toFieldSetWalker { v.schema = tv.schema v.typeRef = tv.typeRef v.set = &fieldpath.Set{} + v.allocator = value.NewFreelistAllocator() return v } @@ -55,6 +56,7 @@ type toFieldSetWalker struct { // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*toFieldSetWalker + allocator value.Allocator } func (v *toFieldSetWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *toFieldSetWalker { @@ -94,7 +96,7 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(t, i, child) + pe, _ := listItemToPathElement(v.allocator, t, i, child) v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) @@ -106,8 +108,10 @@ func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs } func (v *toFieldSetWalker) doList(t *schema.List) (errs ValidationErrors) { - list, _ := listValue(v.value) - + list, _ := listValue(v.allocator, v.value) + if list != nil { + defer v.allocator.Free(list) + } if t.ElementRelationship == schema.Atomic { v.set.Insert(v.path) return nil @@ -143,8 +147,10 @@ func (v *toFieldSetWalker) visitMapItems(t *schema.Map, m value.Map) (errs Valid } func (v *toFieldSetWalker) doMap(t *schema.Map) (errs ValidationErrors) { - m, _ := mapValue(v.value) - + m, _ := mapValue(v.allocator, v.value) + if m != nil { + defer v.allocator.Free(m) + } if t.ElementRelationship == schema.Atomic { v.set.Insert(v.path) return nil diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/typed.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/typed.go index e9c08f39dd..4aa9a23871 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/typed.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/typed.go @@ -238,6 +238,9 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) mw.typeRef = lhs.typeRef mw.rule = rule mw.postItemHook = postRule + if mw.allocator == nil { + mw.allocator = value.NewFreelistAllocator() + } errs := mw.merge(nil) if len(errs) > 0 { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/validate.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/validate.go index 28e9c41af2..5c5a1cb64e 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/validate.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/typed/validate.go @@ -33,6 +33,9 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + if v.allocator == nil { + v.allocator = value.NewFreelistAllocator() + } return v } @@ -49,6 +52,7 @@ type validatingObjectWalker struct { // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker + allocator value.Allocator } func (v *validatingObjectWalker) prepareDescent(tr schema.TypeRef) *validatingObjectWalker { @@ -112,13 +116,14 @@ func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors { func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { observedKeys := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { - child := list.At(i) + child := list.AtUsing(v.allocator, i) + defer v.allocator.Free(child) var pe fieldpath.PathElement if t.ElementRelationship != schema.Associative { pe.Index = &i } else { var err error - pe, err = listItemToPathElement(t, i, child) + pe, err = listItemToPathElement(v.allocator, t, i, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -140,7 +145,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) } func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) { - list, err := listValue(v.value) + list, err := listValue(v.allocator, v.value) if err != nil { return errorf(err.Error()) } @@ -149,13 +154,14 @@ func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) return nil } + defer v.allocator.Free(list) errs = v.visitListItems(t, list) return errs } func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { - m.Iterate(func(key string, val value.Value) bool { + m.IterateUsing(v.allocator, func(key string, val value.Value) bool { pe := fieldpath.PathElement{FieldName: &key} tr := t.ElementType if sf, ok := t.FindField(key); ok { @@ -175,15 +181,14 @@ func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs } func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) { - m, err := mapValue(v.value) + m, err := mapValue(v.allocator, v.value) if err != nil { return errorf(err.Error()) } - if m == nil { return nil } - + defer v.allocator.Free(m) errs = v.visitMapItems(t, m) return errs diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go new file mode 100644 index 0000000000..f70cd41674 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go @@ -0,0 +1,203 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Allocator provides a value object allocation strategy. +// Value objects can be allocated by passing an allocator to the "Using" +// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). +// Value objects returned from "Using" functions should be given back to the allocator +// once longer needed by calling Allocator.Free(Value). +type Allocator interface { + // Free gives the allocator back any value objects returned by the "Using" + // receiver functions on the value interfaces. + // interface{} may be any of: Value, Map, List or Range. + Free(interface{}) + + // The unexported functions are for "Using" receiver functions of the value types + // to request what they need from the allocator. + allocValueUnstructured() *valueUnstructured + allocListUnstructuredRange() *listUnstructuredRange + allocValueReflect() *valueReflect + allocMapReflect() *mapReflect + allocStructReflect() *structReflect + allocListReflect() *listReflect + allocListReflectRange() *listReflectRange +} + +// HeapAllocator simply allocates objects to the heap. It is the default +// allocator used receiver functions on the value interfaces that do not accept +// an allocator and should be used whenever allocating objects that will not +// be given back to an allocator by calling Allocator.Free(Value). +var HeapAllocator = &heapAllocator{} + +type heapAllocator struct{} + +func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { + return &valueUnstructured{} +} + +func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return &listUnstructuredRange{vv: &valueUnstructured{}} +} + +func (p *heapAllocator) allocValueReflect() *valueReflect { + return &valueReflect{} +} + +func (p *heapAllocator) allocStructReflect() *structReflect { + return &structReflect{} +} + +func (p *heapAllocator) allocMapReflect() *mapReflect { + return &mapReflect{} +} + +func (p *heapAllocator) allocListReflect() *listReflect { + return &listReflect{} +} + +func (p *heapAllocator) allocListReflectRange() *listReflectRange { + return &listReflectRange{vr: &valueReflect{}} +} + +func (p *heapAllocator) Free(_ interface{}) {} + +// NewFreelistAllocator creates freelist based allocator. +// This allocator provides fast allocation and freeing of short lived value objects. +// +// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is +// allocated at once, the excess will be returned to the heap for garbage collection when freed. +// +// This allocator is unsafe and must not be accessed concurrently by goroutines. +// +// This allocator works well for traversal of value data trees. Typical usage is to acquire +// a freelist at the beginning of the traversal and use it through out +// for all temporary value access. +func NewFreelistAllocator() Allocator { + return &freelistAllocator{ + valueUnstructured: &freelist{new: func() interface{} { + return &valueUnstructured{} + }}, + listUnstructuredRange: &freelist{new: func() interface{} { + return &listUnstructuredRange{vv: &valueUnstructured{}} + }}, + valueReflect: &freelist{new: func() interface{} { + return &valueReflect{} + }}, + mapReflect: &freelist{new: func() interface{} { + return &mapReflect{} + }}, + structReflect: &freelist{new: func() interface{} { + return &structReflect{} + }}, + listReflect: &freelist{new: func() interface{} { + return &listReflect{} + }}, + listReflectRange: &freelist{new: func() interface{} { + return &listReflectRange{vr: &valueReflect{}} + }}, + } +} + +// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. +// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects +// that don't fit into the freelist are orphaned on the heap to be garbage collected. +const freelistMaxSize = 1000 + +type freelistAllocator struct { + valueUnstructured *freelist + listUnstructuredRange *freelist + valueReflect *freelist + mapReflect *freelist + structReflect *freelist + listReflect *freelist + listReflectRange *freelist +} + +type freelist struct { + list []interface{} + new func() interface{} +} + +func (f *freelist) allocate() interface{} { + var w2 interface{} + if n := len(f.list); n > 0 { + w2, f.list = f.list[n-1], f.list[:n-1] + } else { + w2 = f.new() + } + return w2 +} + +func (f *freelist) free(v interface{}) { + if len(f.list) < freelistMaxSize { + f.list = append(f.list, v) + } +} + +func (w *freelistAllocator) Free(value interface{}) { + switch v := value.(type) { + case *valueUnstructured: + v.Value = nil // don't hold references to unstructured objects + w.valueUnstructured.free(v) + case *listUnstructuredRange: + v.vv.Value = nil // don't hold references to unstructured objects + w.listUnstructuredRange.free(v) + case *valueReflect: + v.ParentMapKey = nil + v.ParentMap = nil + w.valueReflect.free(v) + case *mapReflect: + w.mapReflect.free(v) + case *structReflect: + w.structReflect.free(v) + case *listReflect: + w.listReflect.free(v) + case *listReflectRange: + v.vr.ParentMapKey = nil + v.vr.ParentMap = nil + w.listReflectRange.free(v) + } +} + +func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured { + return w.valueUnstructured.allocate().(*valueUnstructured) +} + +func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return w.listUnstructuredRange.allocate().(*listUnstructuredRange) +} + +func (w *freelistAllocator) allocValueReflect() *valueReflect { + return w.valueReflect.allocate().(*valueReflect) +} + +func (w *freelistAllocator) allocStructReflect() *structReflect { + return w.structReflect.allocate().(*structReflect) +} + +func (w *freelistAllocator) allocMapReflect() *mapReflect { + return w.mapReflect.allocate().(*mapReflect) +} + +func (w *freelistAllocator) allocListReflect() *listReflect { + return w.listReflect.allocate().(*listReflect) +} + +func (w *freelistAllocator) allocListReflectRange() *listReflectRange { + return w.listReflectRange.allocate().(*listReflectRange) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go index 7b87479a9b..be3c672494 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go @@ -27,7 +27,7 @@ type Field struct { Value Value } -// FieldList is a list of key-value pairs. Each field is expectUpdated to +// FieldList is a list of key-value pairs. Each field is expected to // have a different name. type FieldList []Field diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go index 4f5edcccad..0748f18e82 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go @@ -23,24 +23,74 @@ type List interface { // At returns the item at the given position in the map. It will // panic if the index is out of range. At(int) Value + // AtUsing uses the provided allocator and returns the item at the given + // position in the map. It will panic if the index is out of range. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + AtUsing(Allocator, int) Value + // Range returns a ListRange for iterating over the items in the list. + Range() ListRange + // RangeUsing uses the provided allocator and returns a ListRange for + // iterating over the items in the list. + // The returned Range should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + RangeUsing(Allocator) ListRange + // Equals compares the two lists, and return true if they are the same, false otherwise. + // Implementations can use ListEquals as a general implementation for this methods. + Equals(List) bool + // EqualsUsing uses the provided allocator and compares the two lists, and return true if + // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general + // implementation for this methods. + EqualsUsing(Allocator, List) bool +} + +// ListRange represents a single iteration across the items of a list. +type ListRange interface { + // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items. + Next() bool + // Item returns the index and value of the current item in the range. or panics if there is no current item. + // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding + // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either + // Item() or Allocator.Free() is called. + Item() (index int, value Value) +} + +var EmptyRange = &emptyRange{} + +type emptyRange struct{} + +func (_ *emptyRange) Next() bool { + return false +} + +func (_ *emptyRange) Item() (index int, value Value) { + panic("Item called on empty ListRange") } // ListEquals compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. func ListEquals(lhs, rhs List) bool { + return ListEqualsUsing(HeapAllocator, lhs, rhs) +} + +// ListEqualsUsing uses the provided allocator and compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func ListEqualsUsing(a Allocator, lhs, rhs List) bool { if lhs.Length() != rhs.Length() { return false } - for i := 0; i < lhs.Length(); i++ { - lv := lhs.At(i) - rv := rhs.At(i) - if !Equals(lv, rv) { - lv.Recycle() - rv.Recycle() + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for lhsRange.Next() && rhsRange.Next() { + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if !EqualsUsing(a, lv, rv) { return false } - lv.Recycle() - rv.Recycle() } return true } @@ -53,28 +103,37 @@ func ListLess(lhs, rhs List) bool { // ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1 // if l < rhs, and +1 if l > rhs. func ListCompare(lhs, rhs List) int { - i := 0 + return ListCompareUsing(HeapAllocator, lhs, rhs) +} + +// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompareUsing(a Allocator, lhs, rhs List) int { + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + for { - if i >= lhs.Length() && i >= rhs.Length() { + lhsOk := lhsRange.Next() + rhsOk := rhsRange.Next() + if !lhsOk && !rhsOk { // Lists are the same length and all items are equal. return 0 } - if i >= lhs.Length() { + if !lhsOk { // LHS is shorter. return -1 } - if i >= rhs.Length() { + if !rhsOk { // RHS is shorter. return 1 } - lv := lhs.At(i) - rv := rhs.At(i) - if c := Compare(lv, rv); c != 0 { + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if c := CompareUsing(a, lv, rv); c != 0 { return c } - lv.Recycle() - rv.Recycle() // The items are equal; continue. - i++ } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go index 8626ffadb5..197d4c921d 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go @@ -16,7 +16,9 @@ limitations under the License. package value -import "reflect" +import ( + "reflect" +) type listReflect struct { Value reflect.Value @@ -29,7 +31,12 @@ func (r listReflect) Length() int { func (r listReflect) At(i int) Value { val := r.Value - return mustWrapValueReflect(val.Index(i)) + return mustWrapValueReflect(val.Index(i), nil, nil) +} + +func (r listReflect) AtUsing(a Allocator, i int) Value { + val := r.Value + return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil) } func (r listReflect) Unstructured() interface{} { @@ -40,3 +47,52 @@ func (r listReflect) Unstructured() interface{} { } return result } + +func (r listReflect) Range() ListRange { + return r.RangeUsing(HeapAllocator) +} + +func (r listReflect) RangeUsing(a Allocator) ListRange { + length := r.Value.Len() + if length == 0 { + return EmptyRange + } + rr := a.allocListReflectRange() + rr.list = r.Value + rr.i = -1 + rr.entry = TypeReflectEntryOf(r.Value.Type().Elem()) + return rr +} + +func (r listReflect) Equals(other List) bool { + return r.EqualsUsing(HeapAllocator, other) +} +func (r listReflect) EqualsUsing(a Allocator, other List) bool { + if otherReflectList, ok := other.(*listReflect); ok { + return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface()) + } + return ListEqualsUsing(a, &r, other) +} + +type listReflectRange struct { + list reflect.Value + vr *valueReflect + i int + entry *TypeReflectCacheEntry +} + +func (r *listReflectRange) Next() bool { + r.i += 1 + return r.i < r.list.Len() +} + +func (r *listReflectRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= r.list.Len() { + panic("Item() called on ListRange with no more items") + } + v := r.list.Index(r.i) + return r.i, r.vr.mustReuse(v, r.entry, nil, nil) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go index 044249e15f..64cd8e7c0c 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go @@ -25,3 +25,50 @@ func (l listUnstructured) Length() int { func (l listUnstructured) At(i int) Value { return NewValueInterface(l[i]) } + +func (l listUnstructured) AtUsing(a Allocator, i int) Value { + return a.allocValueUnstructured().reuse(l[i]) +} + +func (l listUnstructured) Equals(other List) bool { + return l.EqualsUsing(HeapAllocator, other) +} + +func (l listUnstructured) EqualsUsing(a Allocator, other List) bool { + return ListEqualsUsing(a, &l, other) +} + +func (l listUnstructured) Range() ListRange { + return l.RangeUsing(HeapAllocator) +} + +func (l listUnstructured) RangeUsing(a Allocator) ListRange { + if len(l) == 0 { + return EmptyRange + } + r := a.allocListUnstructuredRange() + r.list = l + r.i = -1 + return r +} + +type listUnstructuredRange struct { + list listUnstructured + vv *valueUnstructured + i int +} + +func (r *listUnstructuredRange) Next() bool { + r.i += 1 + return r.i < len(r.list) +} + +func (r *listUnstructuredRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= len(r.list) { + panic("Item() called on ListRange with no more items") + } + return r.i, r.vv.reuse(r.list[r.i]) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go index da7f10ed23..168b9fa082 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go @@ -18,7 +18,6 @@ package value import ( "sort" - "strings" ) // Map represents a Map or go structure. @@ -27,6 +26,11 @@ type Map interface { Set(key string, val Value) // Get returns the value for the given key, if present, or (nil, false) otherwise. Get(key string) (Value, bool) + // GetUsing uses the provided allocator and returns the value for the given key, + // if present, or (nil, false) otherwise. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + GetUsing(a Allocator, key string) (Value, bool) // Has returns true if the key is present, or false otherwise. Has(key string) bool // Delete removes the key from the map. @@ -34,12 +38,162 @@ type Map interface { // Equals compares the two maps, and return true if they are the same, false otherwise. // Implementations can use MapEquals as a general implementation for this methods. Equals(other Map) bool + // EqualsUsing uses the provided allocator and compares the two maps, and return true if + // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general + // implementation for this methods. + EqualsUsing(a Allocator, other Map) bool // Iterate runs the given function for each key/value in the // map. Returning false in the closure prematurely stops the // iteration. Iterate(func(key string, value Value) bool) bool + // IterateUsing uses the provided allocator and runs the given function for each key/value + // in the map. Returning false in the closure prematurely stops the iteration. + IterateUsing(Allocator, func(key string, value Value) bool) bool // Length returns the number of items in the map. Length() int + // Empty returns true if the map is empty. + Empty() bool + // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called + // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil + // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration. + Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool + // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps + // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with + // the value of the map that contains the key and nil for the map that does not contain the key. Returning + // false in the closure prematurely stops the iteration. + ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool +} + +// MapTraverseOrder defines the map traversal ordering available. +type MapTraverseOrder int + +const ( + // Unordered indicates that the map traversal has no ordering requirement. + Unordered = iota + // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically. + LexicalKeyOrder +) + +// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called +// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil +// for the other map. Returning false in the closure prematurely stops the iteration. +func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return MapZipUsing(HeapAllocator, lhs, rhs, order, fn) +} + +// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps +// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with +// the value of the map that contains the key and nil for the other map. Returning false in the closure +// prematurely stops the iteration. +func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if lhs != nil { + return lhs.ZipUsing(a, rhs, order, fn) + } + if rhs != nil { + return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped + return fn(key, lhs, rhs) + }) + } + return true +} + +// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide +// their own optimized implementation. +func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + switch order { + case Unordered: + return unorderedMapZip(a, lhs, rhs, fn) + case LexicalKeyOrder: + return lexicalKeyOrderedMapZip(a, lhs, rhs, fn) + default: + panic("Unsupported map order") + } +} + +func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) { + return true + } + + if lhs != nil { + ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool { + var rhsValue Value + if rhs != nil { + if item, ok := rhs.GetUsing(a, key); ok { + rhsValue = item + defer a.Free(rhsValue) + } + } + return fn(key, lhsValue, rhsValue) + }) + if !ok { + return false + } + } + if rhs != nil { + return rhs.IterateUsing(a, func(key string, rhsValue Value) bool { + if lhs == nil || !lhs.Has(key) { + return fn(key, nil, rhsValue) + } + return true + }) + } + return true +} + +func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + var lhsLength, rhsLength int + var orderedLength int // rough estimate of length of union of map keys + if lhs != nil { + lhsLength = lhs.Length() + orderedLength = lhsLength + } + if rhs != nil { + rhsLength = rhs.Length() + if rhsLength > orderedLength { + orderedLength = rhsLength + } + } + if lhsLength == 0 && rhsLength == 0 { + return true + } + + ordered := make([]string, 0, orderedLength) + if lhs != nil { + lhs.IterateUsing(a, func(key string, _ Value) bool { + ordered = append(ordered, key) + return true + }) + } + if rhs != nil { + rhs.IterateUsing(a, func(key string, _ Value) bool { + if lhs == nil || !lhs.Has(key) { + ordered = append(ordered, key) + } + return true + }) + } + sort.Strings(ordered) + for _, key := range ordered { + var litem, ritem Value + if lhs != nil { + litem, _ = lhs.GetUsing(a, key) + } + if rhs != nil { + ritem, _ = rhs.GetUsing(a, key) + } + ok := fn(key, litem, ritem) + if litem != nil { + a.Free(litem) + } + if ritem != nil { + a.Free(ritem) + } + if !ok { + return false + } + } + return true } // MapLess compares two maps lexically. @@ -49,65 +203,68 @@ func MapLess(lhs, rhs Map) bool { // MapCompare compares two maps lexically. func MapCompare(lhs, rhs Map) int { - lorder := make([]string, 0, lhs.Length()) - lhs.Iterate(func(key string, _ Value) bool { - lorder = append(lorder, key) - return true - }) - sort.Strings(lorder) - rorder := make([]string, 0, rhs.Length()) - rhs.Iterate(func(key string, _ Value) bool { - rorder = append(rorder, key) - return true - }) - sort.Strings(rorder) + return MapCompareUsing(HeapAllocator, lhs, rhs) +} - i := 0 - for { - if i >= len(lorder) && i >= len(rorder) { - // Maps are the same length and all items are equal. - return 0 - } - if i >= len(lorder) { - // LHS is shorter. - return -1 - } - if i >= len(rorder) { - // RHS is shorter. - return 1 - } - if c := strings.Compare(lorder[i], rorder[i]); c != 0 { - return c - } - litem, _ := lhs.Get(lorder[i]) - ritem, _ := rhs.Get(rorder[i]) - if c := Compare(litem, ritem); c != 0 { - return c - } - litem.Recycle() - ritem.Recycle() - // The items are equal; continue. - i++ +// MapCompareUsing uses the provided allocator and compares two maps lexically. +func MapCompareUsing(a Allocator, lhs, rhs Map) int { + c := 0 + var llength, rlength int + if lhs != nil { + llength = lhs.Length() } + if rhs != nil { + rlength = rhs.Length() + } + if llength == 0 && rlength == 0 { + return 0 + } + i := 0 + MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool { + switch { + case i == llength: + c = -1 + case i == rlength: + c = 1 + case lhs == nil: + c = 1 + case rhs == nil: + c = -1 + default: + c = CompareUsing(a, lhs, rhs) + } + i++ + return c == 0 + }) + return c } // MapEquals returns true if lhs == rhs, false otherwise. This function // acts on generic types and should not be used by callers, but can help // implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. func MapEquals(lhs, rhs Map) bool { + return MapEqualsUsing(HeapAllocator, lhs, rhs) +} + +// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs, +// false otherwise. This function acts on generic types and should not be used +// by callers, but can help implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func MapEqualsUsing(a Allocator, lhs, rhs Map) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } if lhs.Length() != rhs.Length() { return false } - return lhs.Iterate(func(k string, v Value) bool { - vo, ok := rhs.Get(k) - if !ok { + return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool { + if lhs == nil || rhs == nil { return false } - if !Equals(v, vo) { - vo.Recycle() - return false - } - vo.Recycle() - return true + return EqualsUsing(a, lhs, rhs) }) } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go index 2babf2913d..dc8b8c7200 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go @@ -16,7 +16,9 @@ limitations under the License. package value -import "reflect" +import ( + "reflect" +) type mapReflect struct { valueReflect @@ -27,13 +29,27 @@ func (r mapReflect) Length() int { return val.Len() } +func (r mapReflect) Empty() bool { + val := r.Value + return val.Len() == 0 +} + func (r mapReflect) Get(key string) (Value, bool) { - mapKey := r.toMapKey(key) - val := r.Value.MapIndex(mapKey) - if !val.IsValid() { + return r.GetUsing(HeapAllocator, key) +} + +func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) { + k, v, ok := r.get(key) + if !ok { return nil, false } - return mustWrapValueReflectMapItem(&r.Value, &mapKey, val), val != reflect.Value{} + return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true +} + +func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) { + mapKey := r.toMapKey(k) + val := r.Value.MapIndex(mapKey) + return mapKey, val, val.IsValid() && val != reflect.Value{} } func (r mapReflect) Has(key string) bool { @@ -61,21 +77,29 @@ func (r mapReflect) toMapKey(key string) reflect.Value { } func (r mapReflect) Iterate(fn func(string, Value) bool) bool { - return eachMapEntry(r.Value, func(s string, value reflect.Value) bool { - mapVal := mustWrapValueReflect(value) - defer mapVal.Recycle() - return fn(s, mapVal) + return r.IterateUsing(HeapAllocator, fn) +} + +func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + if r.Value.Len() == 0 { + return true + } + v := a.allocValueReflect() + defer a.Free(v) + return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool { + return fn(key.String(), v.mustReuse(value, e, &r.Value, &key)) }) } -func eachMapEntry(val reflect.Value, fn func(string, reflect.Value) bool) bool { +func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool { iter := val.MapRange() + entry := TypeReflectEntryOf(val.Type().Elem()) for iter.Next() { next := iter.Value() if !next.IsValid() { continue } - if !fn(iter.Key().String(), next) { + if !fn(entry, iter.Key(), next) { return false } } @@ -92,16 +116,94 @@ func (r mapReflect) Unstructured() interface{} { } func (r mapReflect) Equals(m Map) bool { - if r.Length() != m.Length() { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { + lhsLength := r.Length() + rhsLength := m.Length() + if lhsLength != rhsLength { return false } - - // TODO: Optimize to avoid Iterate looping here by using r.Value.MapRange or similar if it improves performance. + if lhsLength == 0 { + return true + } + vr := a.allocValueReflect() + defer a.Free(vr) + entry := TypeReflectEntryOf(r.Value.Type().Elem()) return m.Iterate(func(key string, value Value) bool { - lhsVal, ok := r.Get(key) + _, lhsVal, ok := r.get(key) if !ok { return false } - return Equals(lhsVal, value) + return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) }) } + +func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered { + return r.unorderedReflectZip(a, otherMapReflect, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// unorderedReflectZip provides an optimized unordered zip for mapReflect types. +func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool { + if r.Empty() && (other == nil || other.Empty()) { + return true + } + + lhs := r.Value + lhsEntry := TypeReflectEntryOf(lhs.Type().Elem()) + + // map lookup via reflection is expensive enough that it is better to keep track of visited keys + visited := map[string]struct{}{} + + vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(vlhs) + defer a.Free(vrhs) + + if other != nil { + rhs := other.Value + rhsEntry := TypeReflectEntryOf(rhs.Type().Elem()) + iter := rhs.MapRange() + + for iter.Next() { + key := iter.Key() + keyString := key.String() + next := iter.Value() + if !next.IsValid() { + continue + } + rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key) + visited[keyString] = struct{}{} + var lhsVal Value + if _, v, ok := r.get(keyString); ok { + lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key) + } + if !fn(keyString, lhsVal, rhsVal) { + return false + } + } + } + + iter := lhs.MapRange() + for iter.Next() { + key := iter.Key() + if _, ok := visited[key.String()]; ok { + continue + } + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) { + return false + } + } + return true +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go index d778235d0c..d8e208628d 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go @@ -23,10 +23,14 @@ func (m mapUnstructuredInterface) Set(key string, val Value) { } func (m mapUnstructuredInterface) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} + +func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) { if v, ok := m[key]; !ok { return nil, false } else { - return NewValueInterface(v), true + return a.allocValueUnstructured().reuse(v), true } } @@ -40,16 +44,22 @@ func (m mapUnstructuredInterface) Delete(key string) { } func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) for k, v := range m { if ks, ok := k.(string); !ok { continue } else { - vv := NewValueInterface(v) - if !fn(ks, vv) { - vv.Recycle() + if !fn(ks, vv.reuse(v)) { return false } - vv.Recycle() } } return true @@ -59,29 +69,40 @@ func (m mapUnstructuredInterface) Length() int { return len(m) } +func (m mapUnstructuredInterface) Empty() bool { + return len(m) == 0 +} + func (m mapUnstructuredInterface) Equals(other Map) bool { - if m.Length() != other.Length() { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { return false } - for k, v := range m { - ks, ok := k.(string) - if !ok { - return false - } - vo, ok := other.Get(ks) - if !ok { - return false - } - vv := NewValueInterface(v) - if !Equals(vv, vo) { - vv.Recycle() - vo.Recycle() - return false - } - vo.Recycle() - vv.Recycle() + if lhsLength == 0 { + return true } - return true + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) } type mapUnstructuredString map[string]interface{} @@ -91,10 +112,13 @@ func (m mapUnstructuredString) Set(key string, val Value) { } func (m mapUnstructuredString) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} +func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) { if v, ok := m[key]; !ok { return nil, false } else { - return NewValueInterface(v), true + return a.allocValueUnstructured().reuse(v), true } } @@ -108,13 +132,19 @@ func (m mapUnstructuredString) Delete(key string) { } func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) for k, v := range m { - vv := NewValueInterface(v) - if !fn(k, vv) { - vv.Recycle() + if !fn(k, vv.reuse(v)) { return false } - vv.Recycle() } return true } @@ -124,22 +154,37 @@ func (m mapUnstructuredString) Length() int { } func (m mapUnstructuredString) Equals(other Map) bool { - if m.Length() != other.Length() { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { return false } - for k, v := range m { - vo, ok := other.Get(k) + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] if !ok { return false } - vv := NewValueInterface(v) - if !Equals(vv, vo) { - vo.Recycle() - vv.Recycle() - return false - } - vo.Recycle() - vv.Recycle() - } - return true + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +func (m mapUnstructuredString) Empty() bool { + return len(m) == 0 } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go new file mode 100644 index 0000000000..49e6dd1690 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go @@ -0,0 +1,463 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// UnstructuredConverter defines how a type can be converted directly to unstructured. +// Types that implement json.Marshaler may also optionally implement this interface to provide a more +// direct and more efficient conversion. All types that choose to implement this interface must still +// implement this same conversion via json.Marshaler. +type UnstructuredConverter interface { + json.Marshaler // require that json.Marshaler is implemented + + // ToUnstructured returns the unstructured representation. + ToUnstructured() interface{} +} + +// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured. +type TypeReflectCacheEntry struct { + isJsonMarshaler bool + ptrIsJsonMarshaler bool + isJsonUnmarshaler bool + ptrIsJsonUnmarshaler bool + isStringConvertable bool + ptrIsStringConvertable bool + + structFields map[string]*FieldCacheEntry + orderedStructFields []*FieldCacheEntry +} + +// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from +// unstructured. +type FieldCacheEntry struct { + // JsonName returns the name of the field according to the json tags on the struct field. + JsonName string + // isOmitEmpty is true if the field has the json 'omitempty' tag. + isOmitEmpty bool + // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of + // a field in a reflect.Value struct. The field indices in the list form a path used + // to traverse through intermediary 'inline' fields. + fieldPath [][]int + + fieldType reflect.Type + TypeEntry *TypeReflectCacheEntry +} + +func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { + return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) +} + +// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. +func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { + // field might be nested within 'inline' structs + for _, elem := range f.fieldPath { + structVal = structVal.FieldByIndex(elem) + } + return structVal +} + +var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem() +var defaultReflectCache = newReflectCache() + +// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type. +func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry { + cm := defaultReflectCache.get() + if record, ok := cm[t]; ok { + return record + } + updates := reflectCacheMap{} + result := typeReflectEntryOf(cm, t, updates) + if len(updates) > 0 { + defaultReflectCache.update(updates) + } + return result +} + +// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively +// depend on, to the cache. +func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry { + if record, ok := cm[t]; ok { + return record + } + if record, ok := updates[t]; ok { + return record + } + typeEntry := &TypeReflectCacheEntry{ + isJsonMarshaler: t.Implements(marshalerType), + ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType), + isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType), + isStringConvertable: t.Implements(unstructuredConvertableType), + ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType), + } + if t.Kind() == reflect.Struct { + fieldEntries := map[string]*FieldCacheEntry{} + buildStructCacheEntry(t, fieldEntries, nil) + typeEntry.structFields = fieldEntries + sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries)) + i := 0 + for _, entry := range fieldEntries { + sortedByJsonName[i] = entry + i++ + } + sort.Slice(sortedByJsonName, func(i, j int) bool { + return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName + }) + typeEntry.orderedStructFields = sortedByJsonName + } + + // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving + // the field.typeEntry references, or creating them if they are not already in the cache + updates[t] = typeEntry + + for _, field := range typeEntry.structFields { + if field.TypeEntry == nil { + field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates) + } + } + return typeEntry +} + +func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) + if omit { + continue + } + if isInline { + buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) + continue + } + info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} + infos[jsonName] = info + } +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry { + return e.structFields +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry { + return e.orderedStructFields +} + +// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured. +func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool { + return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable +} + +// ToUnstructured converts the provided value to unstructured and returns it. +func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) { + // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 + // and is intended to replace it. + + // Check if the object has a custom string converter and use it if available, since it is much more efficient + // than round tripping through json. + if converter, ok := e.getUnstructuredConverter(sv); ok { + return converter.ToUnstructured(), nil + } + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := e.getJsonMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } + + data, err := marshaler.MarshalJSON() + if err != nil { + return nil, err + } + switch { + case len(data) == 0: + return nil, fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + return nil, nil + + case bytes.Equal(data, trueBytes): + return true, nil + + case bytes.Equal(data, falseBytes): + return false, nil + + case data[0] == '"': + var result string + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding string from json: %v", err) + } + return result, nil + + case data[0] == '{': + result := make(map[string]interface{}) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding object from json: %v", err) + } + return result, nil + + case data[0] == '[': + result := make([]interface{}, 0) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding array from json: %v", err) + } + return result, nil + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = unmarshal(data, &resultInt); err == nil { + return resultInt, nil + } else if err = unmarshal(data, &resultFloat); err == nil { + return resultFloat, nil + } else { + return nil, fmt.Errorf("error decoding number from json: %v", err) + } + } + } + + return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type()) +} + +// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured. +func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool { + return e.isJsonUnmarshaler +} + +// FromUnstructured converts the provided source value from unstructured into the provided destination value. +func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error { + // TODO: this could be made much more efficient using direct conversions like + // UnstructuredConverter.ToUnstructured provides. + st := dv.Type() + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok { + return unmarshaler.UnmarshalJSON(data) + } + return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type()) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) { + if e.isJsonMarshaler { + return v.Interface().(json.Marshaler), true + } + if e.ptrIsJsonMarshaler { + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + return v.Interface().(json.Marshaler), true + } + } + return nil, false +} + +func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) { + if !e.isJsonUnmarshaler { + return nil, false + } + return v.Addr().Interface().(json.Unmarshaler), true +} + +func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) { + if e.isStringConvertable { + return v.Interface().(UnstructuredConverter), true + } + if e.ptrIsStringConvertable { + // Check pointer receivers if v is not a pointer + if v.CanAddr() { + v = v.Addr() + return v.Interface().(UnstructuredConverter), true + } + } + return nil, false +} + +type typeReflectCache struct { + // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any + // go program using this cache + value atomic.Value + // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a + // read-lock since the atomic value is always read-only + mu sync.Mutex +} + +func newReflectCache() *typeReflectCache { + cache := &typeReflectCache{} + cache.value.Store(make(reflectCacheMap)) + return cache +} + +type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry + +// get returns the reflectCacheMap. +func (c *typeReflectCache) get() reflectCacheMap { + return c.value.Load().(reflectCacheMap) +} + +// update merges the provided updates into the cache. +func (c *typeReflectCache) update(updates reflectCacheMap) { + c.mu.Lock() + defer c.mu.Unlock() + + currentCacheMap := c.value.Load().(reflectCacheMap) + + hasNewEntries := false + for t := range updates { + if _, ok := currentCacheMap[t]; !ok { + hasNewEntries = true + break + } + } + if !hasNewEntries { + // Bail if the updates have been set while waiting for lock acquisition. + // This is safe since setting entries is idempotent. + return + } + + newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates)) + for k, v := range currentCacheMap { + newCacheMap[k] = v + } + for t, update := range updates { + newCacheMap[t] = update + } + c.value.Store(newCacheMap) +} + +// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json +// to handle number conversions as expected by Kubernetes + +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + +// unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v, 0) + + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v, 0) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go index 42f149aa9f..4a7bb5c6eb 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go @@ -19,162 +19,66 @@ package value import ( "fmt" "reflect" - "sync" - "sync/atomic" ) -// reflectStructCache keeps track of json tag related data for structs and fields to speed up reflection. -// TODO: This overlaps in functionality with the fieldCache in -// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L57 but -// is more efficient at lookup by json field name. The logic should be consolidated. Only one copy of the cache needs -// to be kept for each running process. -var ( - reflectStructCache = newStructCache() -) - -type structCache struct { - // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any - // go program using this cache - value atomic.Value - // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a - // read-lock since the atomic value is always read-only - mu sync.Mutex -} - -type structCacheMap map[reflect.Type]structCacheEntry - -// structCacheEntry contains information about each struct field, keyed by json field name, that is expensive to -// compute using reflection. -type structCacheEntry map[string]*fieldCacheEntry - -// Get returns true and fieldCacheEntry for the given type if the type is in the cache. Otherwise Get returns false. -func (c *structCache) Get(t reflect.Type) (map[string]*fieldCacheEntry, bool) { - entry, ok := c.value.Load().(structCacheMap)[t] - return entry, ok -} - -// Set sets the fieldCacheEntry for the given type via a copy-on-write update to the struct cache. -func (c *structCache) Set(t reflect.Type, m map[string]*fieldCacheEntry) { - c.mu.Lock() - defer c.mu.Unlock() - - currentCacheMap := c.value.Load().(structCacheMap) - - if _, ok := currentCacheMap[t]; ok { - // Bail if the entry has been set while waiting for lock acquisition. - // This is safe since setting entries is idempotent. - return - } - - newCacheMap := make(structCacheMap, len(currentCacheMap)+1) - for k, v := range currentCacheMap { - newCacheMap[k] = v - } - newCacheMap[t] = m - c.value.Store(newCacheMap) -} - -func newStructCache() *structCache { - cache := &structCache{} - cache.value.Store(make(structCacheMap)) - return cache -} - -type fieldCacheEntry struct { - // isOmitEmpty is true if the field has the json 'omitempty' tag. - isOmitEmpty bool - // fieldPath is the field indices (see FieldByIndex) to lookup the value of - // a field in a reflect.Value struct. A path of field indices is used - // to support traversing to a field field in struct fields that have the 'inline' - // json tag. - fieldPath [][]int -} - -func (f *fieldCacheEntry) getFieldFromStruct(structVal reflect.Value) reflect.Value { - // field might be field within 'inline' structs - for _, elem := range f.fieldPath { - structVal = structVal.FieldByIndex(elem) - } - return structVal -} - -func getStructCacheEntry(t reflect.Type) structCacheEntry { - if hints, ok := reflectStructCache.Get(t); ok { - return hints - } - - hints := map[string]*fieldCacheEntry{} - buildStructCacheEntry(t, hints, nil) - - reflectStructCache.Set(t, hints) - return hints -} - -func buildStructCacheEntry(t reflect.Type, infos map[string]*fieldCacheEntry, fieldPath [][]int) { - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) - if omit { - continue - } - if isInline { - buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) - continue - } - info := &fieldCacheEntry{isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index)} - infos[jsonName] = info - - } -} - type structReflect struct { valueReflect } func (r structReflect) Length() int { i := 0 - eachStructField(r.Value, func(s string, value reflect.Value) bool { + eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { i++ return true }) return i } +func (r structReflect) Empty() bool { + return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return false // exit early if the struct is non-empty + }) +} + func (r structReflect) Get(key string) (Value, bool) { - if val, ok, _ := r.findJsonNameField(key); ok { - return mustWrapValueReflect(val), true + return r.GetUsing(HeapAllocator, key) +} + +func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) { + if val, ok := r.findJsonNameField(key); ok { + return a.allocValueReflect().mustReuse(val, nil, nil, nil), true } return nil, false } func (r structReflect) Has(key string) bool { - _, ok, _ := r.findJsonNameField(key) + _, ok := r.findJsonNameField(key) return ok } func (r structReflect) Set(key string, val Value) { - fieldEntry, ok := getStructCacheEntry(r.Value.Type())[key] + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] if !ok { panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface())) } - oldVal := fieldEntry.getFieldFromStruct(r.Value) + oldVal := fieldEntry.GetFrom(r.Value) newVal := reflect.ValueOf(val.Unstructured()) r.update(fieldEntry, key, oldVal, newVal) } func (r structReflect) Delete(key string) { - fieldEntry, ok := getStructCacheEntry(r.Value.Type())[key] + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] if !ok { panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface())) } - oldVal := fieldEntry.getFieldFromStruct(r.Value) + oldVal := fieldEntry.GetFrom(r.Value) if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty { panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface())) } r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type())) } -func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, newVal reflect.Value) { +func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) { if oldVal.CanSet() { oldVal.Set(newVal) return @@ -187,7 +91,7 @@ func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, n panic("ParentMapKey must not be nil if ParentMap is not nil") } replacement := reflect.New(r.Value.Type()).Elem() - fieldEntry.getFieldFromStruct(replacement).Set(newVal) + fieldEntry.GetFrom(replacement).Set(newVal) r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement) return } @@ -198,21 +102,25 @@ func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, n } func (r structReflect) Iterate(fn func(string, Value) bool) bool { - return eachStructField(r.Value, func(s string, value reflect.Value) bool { - v := mustWrapValueReflect(value) - defer v.Recycle() - return fn(s, v) + return r.IterateUsing(HeapAllocator, fn) +} + +func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + vr := a.allocValueReflect() + defer a.Free(vr) + return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return fn(s, vr.mustReuse(value, e, nil, nil)) }) } -func eachStructField(structVal reflect.Value, fn func(string, reflect.Value) bool) bool { - for jsonName, fieldCacheEntry := range getStructCacheEntry(structVal.Type()) { - fieldVal := fieldCacheEntry.getFieldFromStruct(structVal) - if fieldCacheEntry.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) { +func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool { + for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() { + fieldVal := fieldCacheEntry.GetFrom(structVal) + if fieldCacheEntry.CanOmit(fieldVal) { // omit it continue } - ok := fn(jsonName, fieldVal) + ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal) if !ok { return false } @@ -231,39 +139,70 @@ func (r structReflect) Unstructured() interface{} { } func (r structReflect) Equals(m Map) bool { - if rhsStruct, ok := m.(structReflect); ok { - return reflect.DeepEqual(r.Value.Interface(), rhsStruct.Value.Interface()) - } - if r.Length() != m.Length() { - return false - } - structCacheEntry := getStructCacheEntry(r.Value.Type()) + return r.EqualsUsing(HeapAllocator, m) +} - return m.Iterate(func(s string, value Value) bool { - fieldCacheEntry, ok := structCacheEntry[s] - if !ok { - return false - } - lhsVal := fieldCacheEntry.getFieldFromStruct(r.Value) - return Equals(mustWrapValueReflect(lhsVal), value) - }) +func (r structReflect) EqualsUsing(a Allocator, m Map) bool { + // MapEquals uses zip and is fairly efficient for structReflect + return MapEqualsUsing(a, &r, m) } func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) { - structCacheEntry, ok := getStructCacheEntry(r.Value.Type())[jsonName] + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] if !ok { return reflect.Value{}, false } - fieldVal := structCacheEntry.getFieldFromStruct(r.Value) - omit := structCacheEntry.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) - return fieldVal, !omit + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) } -func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool, omitEmpty bool) { - structCacheEntry, ok := getStructCacheEntry(r.Value.Type())[jsonName] +func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] if !ok { - return reflect.Value{}, false, false + return reflect.Value{}, false } - fieldVal := structCacheEntry.getFieldFromStruct(r.Value) - return fieldVal, true, structCacheEntry.isOmitEmpty + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() { + lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(lhsvr) + defer a.Free(rhsvr) + return r.structZip(otherStruct, lhsvr, rhsvr, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is +// no additional cost to ordering the zip for structured types. +func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool { + lhsVal := r.Value + rhsVal := other.Value + + for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() { + lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal) + rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal) + lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal) + rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal) + if lhsOmit && rhsOmit { + continue + } + var lhsVal, rhsVal Value + if !lhsOmit { + lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !rhsOmit { + rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) { + return false + } + } + return true } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go index 889bedfff4..ea79e3a000 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go @@ -55,9 +55,15 @@ type Value interface { // AsMap converts the Value into a Map (or panic if the type // doesn't allow it). AsMap() Map + // AsMapUsing uses the provided allocator and converts the Value + // into a Map (or panic if the type doesn't allow it). + AsMapUsing(Allocator) Map // AsList converts the Value into a List (or panic if the type // doesn't allow it). AsList() List + // AsListUsing uses the provided allocator and converts the Value + // into a List (or panic if the type doesn't allow it). + AsListUsing(Allocator) List // AsBool converts the Value into a bool (or panic if the type // doesn't allow it). AsBool() bool @@ -71,10 +77,6 @@ type Value interface { // doesn't allow it). AsString() string - // Recycle returns a value of this type that is no longer needed. The - // value shouldn't be used after this call. - Recycle() - // Unstructured converts the Value into an Unstructured interface{}. Unstructured() interface{} } @@ -128,6 +130,11 @@ func ToYAML(v Value) ([]byte, error) { // Equals returns true iff the two values are equal. func Equals(lhs, rhs Value) bool { + return EqualsUsing(HeapAllocator, lhs, rhs) +} + +// EqualsUsing uses the provided allocator and returns true iff the two values are equal. +func EqualsUsing(a Allocator, lhs, rhs Value) bool { if lhs.IsFloat() || rhs.IsFloat() { var lf float64 if lhs.IsFloat() { @@ -173,7 +180,11 @@ func Equals(lhs, rhs Value) bool { } if lhs.IsList() { if rhs.IsList() { - return ListEquals(lhs.AsList(), rhs.AsList()) + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) } return false } else if rhs.IsList() { @@ -181,7 +192,11 @@ func Equals(lhs, rhs Value) bool { } if lhs.IsMap() { if rhs.IsMap() { - return lhs.AsMap().Equals(rhs.AsMap()) + lhsList := lhs.AsMapUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsMapUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) } return false } else if rhs.IsMap() { @@ -215,8 +230,9 @@ func ToString(v Value) string { return fmt.Sprintf("%v", v.AsBool()) case v.IsList(): strs := []string{} - for i := 0; i < v.AsList().Length(); i++ { - strs = append(strs, ToString(v.AsList().At(i))) + list := v.AsList() + for i := 0; i < list.Length(); i++ { + strs = append(strs, ToString(list.At(i))) } return "[" + strings.Join(strs, ",") + "]" case v.IsMap(): @@ -241,6 +257,14 @@ func Less(lhs, rhs Value) bool { // sorted, even if they are of different types). The result will be 0 if // v==rhs, -1 if v < rhs, and +1 if v > rhs. func Compare(lhs, rhs Value) int { + return CompareUsing(HeapAllocator, lhs, rhs) +} + +// CompareUsing uses the provided allocator and provides a total +// ordering for Value (so that they can be sorted, even if they +// are of different types). The result will be 0 if v==rhs, -1 +// if v < rhs, and +1 if v > rhs. +func CompareUsing(a Allocator, lhs, rhs Value) int { if lhs.IsFloat() { if !rhs.IsFloat() { // Extra: compare floats and ints numerically. @@ -289,7 +313,11 @@ func Compare(lhs, rhs Value) int { if !rhs.IsList() { return -1 } - return ListCompare(lhs.AsList(), rhs.AsList()) + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return ListCompareUsing(a, lhsList, rhsList) } else if rhs.IsList() { return 1 } @@ -297,7 +325,11 @@ func Compare(lhs, rhs Value) int { if !rhs.IsMap() { return -1 } - return MapCompare(lhs.AsMap(), rhs.AsMap()) + lhsMap := lhs.AsMapUsing(a) + defer a.Free(lhsMap) + rhsMap := rhs.AsMapUsing(a) + defer a.Free(rhsMap) + return MapCompareUsing(a, lhsMap, rhsMap) } else if rhs.IsMap() { return 1 } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go index ed95c952de..05e70debae 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go @@ -17,20 +17,11 @@ limitations under the License. package value import ( - "bytes" "encoding/base64" - "encoding/json" "fmt" "reflect" - "sync" ) -var reflectPool = sync.Pool{ - New: func() interface{} { - return &valueReflect{} - }, -} - // NewValueReflect creates a Value backed by an "interface{}" type, // typically an structured object in Kubernetes world that is uses reflection to expose. // The provided "interface{}" value must be a pointer so that the value can be modified via reflection. @@ -45,33 +36,57 @@ func NewValueReflect(value interface{}) (Value, error) { // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible. return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer") } - return wrapValueReflect(nil, nil, v) + return wrapValueReflect(v, nil, nil) } -func wrapValueReflect(parentMap, parentMapKey *reflect.Value, value reflect.Value) (Value, error) { - // TODO: conversion of json.Marshaller interface types is expensive. This can be mostly optimized away by - // introducing conversion functions that do not require going through JSON and using those here. - if marshaler, ok := getMarshaler(value); ok { - return toUnstructured(marshaler, value) - } - value = dereference(value) - val := reflectPool.Get().(*valueReflect) - val.Value = value - val.ParentMap = parentMap - val.ParentMapKey = parentMapKey - return Value(val), nil +// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap +// and parentMapKey must be provided so that the returned value may be set and deleted. +func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) { + val := HeapAllocator.allocValueReflect() + return val.reuse(value, nil, parentMap, parentMapKey) } -func mustWrapValueReflect(value reflect.Value) Value { - v, err := wrapValueReflect(nil, nil, value) +// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data +// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value { + v, err := wrapValueReflect(value, parentMap, parentMapKey) if err != nil { panic(err) } return v } -func mustWrapValueReflectMapItem(parentMap, parentMapKey *reflect.Value, value reflect.Value) Value { - v, err := wrapValueReflect(parentMap, parentMapKey, value) +// the value interface doesn't care about the type for value.IsNull, so we can use a constant +var nilType = reflect.TypeOf(&struct{}{}) + +// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey +// must be provided so that the returned value may be set and deleted. +func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) { + if cacheEntry == nil { + cacheEntry = TypeReflectEntryOf(value.Type()) + } + if cacheEntry.CanConvertToUnstructured() { + u, err := cacheEntry.ToUnstructured(value) + if err != nil { + return nil, err + } + if u == nil { + value = reflect.Zero(nilType) + } else { + value = reflect.ValueOf(u) + } + } + r.Value = dereference(value) + r.ParentMap = parentMap + r.ParentMapKey = parentMapKey + r.kind = kind(r.Value) + return r, nil +} + +// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a +// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value { + v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey) if err != nil { panic(err) } @@ -90,53 +105,91 @@ type valueReflect struct { ParentMap *reflect.Value ParentMapKey *reflect.Value Value reflect.Value + kind reflectType } func (r valueReflect) IsMap() bool { - return r.isKind(reflect.Map, reflect.Struct) + return r.kind == mapType || r.kind == structMapType } func (r valueReflect) IsList() bool { - typ := r.Value.Type() - return typ.Kind() == reflect.Slice && !(typ.Elem().Kind() == reflect.Uint8) + return r.kind == listType } func (r valueReflect) IsBool() bool { - return r.isKind(reflect.Bool) + return r.kind == boolType } func (r valueReflect) IsInt() bool { - // Uint64 deliberately excluded, see valueUnstructured.Int. - return r.isKind(reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8) + return r.kind == intType || r.kind == uintType } func (r valueReflect) IsFloat() bool { - return r.isKind(reflect.Float64, reflect.Float32) + return r.kind == floatType } func (r valueReflect) IsString() bool { - kind := r.Value.Kind() - if kind == reflect.String { - return true - } - if kind == reflect.Slice && r.Value.Type().Elem().Kind() == reflect.Uint8 { - return true - } - return false + return r.kind == stringType || r.kind == byteStringType } func (r valueReflect) IsNull() bool { - return safeIsNil(r.Value) + return r.kind == nullType } -func (r valueReflect) isKind(kinds ...reflect.Kind) bool { - kind := r.Value.Kind() - for _, k := range kinds { - if kind == k { - return true +type reflectType = int + +const ( + mapType = iota + structMapType + listType + intType + uintType + floatType + stringType + byteStringType + boolType + nullType +) + +func kind(v reflect.Value) reflectType { + typ := v.Type() + rk := typ.Kind() + switch rk { + case reflect.Map: + if v.IsNil() { + return nullType } + return mapType + case reflect.Struct: + return structMapType + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return intType + case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8: + // Uint64 deliberately excluded, see valueUnstructured.Int. + return uintType + case reflect.Float64, reflect.Float32: + return floatType + case reflect.String: + return stringType + case reflect.Bool: + return boolType + case reflect.Slice: + if v.IsNil() { + return nullType + } + elemKind := typ.Elem().Kind() + if elemKind == reflect.Uint8 { + return byteStringType + } + return listType + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface: + if v.IsNil() { + return nullType + } + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + default: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) } - return false } // TODO find a cleaner way to avoid panics from reflect.IsNil() @@ -150,24 +203,33 @@ func safeIsNil(v reflect.Value) bool { } func (r valueReflect) AsMap() Map { - val := r.Value - switch val.Kind() { - case reflect.Struct: - return structReflect{r} - case reflect.Map: - return mapReflect{r} + return r.AsMapUsing(HeapAllocator) +} + +func (r valueReflect) AsMapUsing(a Allocator) Map { + switch r.kind { + case structMapType: + v := a.allocStructReflect() + v.valueReflect = r + return v + case mapType: + v := a.allocMapReflect() + v.valueReflect = r + return v default: panic("value is not a map or struct") } } -func (r *valueReflect) Recycle() { - reflectPool.Put(r) +func (r valueReflect) AsList() List { + return r.AsListUsing(HeapAllocator) } -func (r valueReflect) AsList() List { +func (r valueReflect) AsListUsing(a Allocator) List { if r.IsList() { - return listReflect{r.Value} + v := a.allocListReflect() + v.Value = r.Value + return v } panic("value is not a list") } @@ -180,10 +242,10 @@ func (r valueReflect) AsBool() bool { } func (r valueReflect) AsInt() int64 { - if r.isKind(reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8) { + if r.kind == intType { return r.Value.Int() } - if r.isKind(reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8) { + if r.kind == uintType { return int64(r.Value.Uint()) } @@ -198,11 +260,10 @@ func (r valueReflect) AsFloat() float64 { } func (r valueReflect) AsString() string { - kind := r.Value.Kind() - if kind == reflect.String { + switch r.kind { + case stringType: return r.Value.String() - } - if kind == reflect.Slice && r.Value.Type().Elem().Kind() == reflect.Uint8 { + case byteStringType: return base64.StdEncoding.EncodeToString(r.Value.Bytes()) } panic("value is not a string") @@ -216,9 +277,9 @@ func (r valueReflect) Unstructured() interface{} { case val.Kind() == reflect.Struct: return structReflect{r}.Unstructured() case val.Kind() == reflect.Map: - return mapReflect{r}.Unstructured() + return mapReflect{valueReflect: r}.Unstructured() case r.IsList(): - return listReflect{Value: r.Value}.Unstructured() + return listReflect{r.Value}.Unstructured() case r.IsString(): return r.AsString() case r.IsInt(): @@ -231,89 +292,3 @@ func (r valueReflect) Unstructured() interface{} { panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type())) } } - -// The below getMarshaler and toUnstructured functions are based on -// https://github.com/kubernetes/kubernetes/blob/40df9f82d0572a123f5ad13f48312978a2ff5877/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L509 -// and should somehow be consolidated with it - -var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() - -func getMarshaler(v reflect.Value) (json.Marshaler, bool) { - // Check value receivers if v is not a pointer and pointer receivers if v is a pointer - if v.Type().Implements(marshalerType) { - return v.Interface().(json.Marshaler), true - } - // Check pointer receivers if v is not a pointer - if v.Kind() != reflect.Ptr && v.CanAddr() { - v = v.Addr() - if v.Type().Implements(marshalerType) { - return v.Interface().(json.Marshaler), true - } - } - return nil, false -} - -var ( - nullBytes = []byte("null") - trueBytes = []byte("true") - falseBytes = []byte("false") -) - -func toUnstructured(marshaler json.Marshaler, sv reflect.Value) (Value, error) { - data, err := marshaler.MarshalJSON() - if err != nil { - return nil, err - } - switch { - case len(data) == 0: - return nil, fmt.Errorf("error decoding from json: empty value") - - case bytes.Equal(data, nullBytes): - // We're done - we don't need to store anything. - return NewValueInterface(nil), nil - - case bytes.Equal(data, trueBytes): - return NewValueInterface(true), nil - - case bytes.Equal(data, falseBytes): - return NewValueInterface(false), nil - - case data[0] == '"': - var result string - err := json.Unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding string from json: %v", err) - } - return NewValueInterface(result), nil - - case data[0] == '{': - result := make(map[string]interface{}) - err := json.Unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding object from json: %v", err) - } - return NewValueInterface(result), nil - - case data[0] == '[': - result := make([]interface{}, 0) - err := json.Unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding array from json: %v", err) - } - return NewValueInterface(result), nil - - default: - var ( - resultInt int64 - resultFloat float64 - err error - ) - if err = json.Unmarshal(data, &resultInt); err == nil { - return NewValueInterface(resultInt), nil - } - if err = json.Unmarshal(data, &resultFloat); err == nil { - return NewValueInterface(resultFloat), nil - } - return nil, fmt.Errorf("error decoding number from json: %v", err) - } -} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go index bfc9fef8fd..ac5a926285 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go @@ -18,29 +18,26 @@ package value import ( "fmt" - "sync" ) -var viPool = sync.Pool{ - New: func() interface{} { - return &valueUnstructured{} - }, -} - // NewValueInterface creates a Value backed by an "interface{}" type, // typically an unstructured object in Kubernetes world. // interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. func NewValueInterface(v interface{}) Value { - vi := viPool.Get().(*valueUnstructured) - vi.Value = v - return Value(vi) + return Value(HeapAllocator.allocValueUnstructured().reuse(v)) } type valueUnstructured struct { Value interface{} } +// reuse replaces the value of the valueUnstructured. +func (vi *valueUnstructured) reuse(value interface{}) Value { + vi.Value = value + return vi +} + func (v valueUnstructured) IsMap() bool { if _, ok := v.Value.(map[string]interface{}); ok { return true @@ -52,6 +49,10 @@ func (v valueUnstructured) IsMap() bool { } func (v valueUnstructured) AsMap() Map { + return v.AsMapUsing(HeapAllocator) +} + +func (v valueUnstructured) AsMapUsing(_ Allocator) Map { if v.Value == nil { panic("invalid nil") } @@ -73,6 +74,10 @@ func (v valueUnstructured) IsList() bool { } func (v valueUnstructured) AsList() List { + return v.AsListUsing(HeapAllocator) +} + +func (v valueUnstructured) AsListUsing(_ Allocator) List { return listUnstructured(v.Value.([]interface{})) } @@ -168,10 +173,6 @@ func (v valueUnstructured) IsNull() bool { return v.Value == nil } -func (v *valueUnstructured) Recycle() { - viPool.Put(v) -} - func (v valueUnstructured) Unstructured() interface{} { return v.Value } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml index 03ddc7318a..d20e23eff4 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,14 +1,13 @@ language: go dist: xenial go: - - 1.9.x - - 1.10.x - - 1.11.x + - 1.12.x + - 1.13.x script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(gofmt -d *.go) - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - go tool vet . - - go test -v -race ./... + - GO111MODULE=on go vet . + - GO111MODULE=on go test -v -race ./... + - git diff --exit-code install: - - go get golang.org/x/lint/golint + - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS index 11ad7ce1a4..325b40b076 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - dims - lavalamp diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md index 0200f75b4d..5a651d9163 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md @@ -1,12 +1,14 @@ # YAML marshaling and unmarshaling support for Go -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) +[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) + +kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). ## Introduction A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility @@ -32,13 +34,13 @@ GOOD: To install, run: ``` -$ go get github.com/ghodss/yaml +$ go get sigs.k8s.io/yaml ``` And import using: ``` -import "github.com/ghodss/yaml" +import "sigs.k8s.io/yaml" ``` Usage is very similar to the JSON library: @@ -49,7 +51,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) type Person struct { @@ -93,7 +95,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) func main() { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.mod b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.mod new file mode 100644 index 0000000000..7224f34971 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.mod @@ -0,0 +1,8 @@ +module sigs.k8s.io/yaml + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.sum b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.sum new file mode 100644 index 0000000000..76e49483af --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go index 024596112a..efbc535d41 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go @@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in return yamlObj, nil } } + +// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, +// without going through a byte representation. A nil or empty map[string]interface{} input is +// converted to an empty map, i.e. yaml.MapSlice(nil). +// +// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. +// +// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: +// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. +// - int64s are down-casted to int if possible without data-loss. +// +// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. +// +// string, bool and any other types are unchanged. +func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { + if len(j) == 0 { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + if j == nil { + return interface{}(nil) + } + return JSONObjectToYAMLObject(j) + case []interface{}: + if j == nil { + return interface{}(nil) + } + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +}