Godeps update
This commit is contained in:
parent
897e7720d5
commit
d51c9c0c6c
File diff suppressed because it is too large
Load Diff
|
|
@ -1,5 +1,24 @@
|
|||
commit c83dd0424f9866bfd364e31a431bade187970ce2 (origin/release-1.10)
|
||||
Author: caleb miles <calebmiles@google.com>
|
||||
Date: Mon Mar 19 20:35:57 2018 -0400
|
||||
commit eac9c4ff5711158eaca899a58d1293e39cbd040e (origin/master, origin/HEAD)
|
||||
Merge: c855accaec 86e152dc3f
|
||||
Author: Kubernetes Submit Queue <k8s-merge-robot@users.noreply.github.com>
|
||||
Date: Thu Jun 14 05:09:04 2018 -0700
|
||||
|
||||
Add/Update CHANGELOG-1.10.md for v1.10.0-rc.1.
|
||||
Merge pull request #65070 from davidz627/fix/externalProvisionerClusterRole
|
||||
|
||||
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
|
||||
|
||||
Added PV GET api rule to external-provisioner
|
||||
|
||||
Adds the PV GET API rule to the system:external-provisioner cluster role. It is required because the provisioner does a GET here:
|
||||
https://github.com/kubernetes-incubator/external-storage/blob/master/lib/controller/controller.go#L1121
|
||||
|
||||
Fixes #65058
|
||||
|
||||
/sig storage
|
||||
/kind bug
|
||||
/priority critical-urgent
|
||||
/cc @msau42 @sbezverk
|
||||
|
||||
```release-note
|
||||
NONE
|
||||
```
|
||||
|
|
|
|||
2
cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
2
cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
|
|
@ -40,7 +40,7 @@ func DebugInfo(watches map[string][]string) map[string][]string {
|
|||
lines = append(lines, fmt.Sprintf("\t%s", cg))
|
||||
}
|
||||
}
|
||||
out["Fsnotify watches"] = lines
|
||||
out["Inotify watches"] = lines
|
||||
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,15 +17,15 @@ package common
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"golang.org/x/exp/inotify"
|
||||
)
|
||||
|
||||
// Watcher for container-related fsnotify events in the cgroup hierarchy.
|
||||
// Watcher for container-related inotify events in the cgroup hierarchy.
|
||||
//
|
||||
// Implementation is thread-safe.
|
||||
type FsnotifyWatcher struct {
|
||||
// Underlying fsnotify watcher.
|
||||
watcher *fsnotify.Watcher
|
||||
type InotifyWatcher struct {
|
||||
// Underlying inotify watcher.
|
||||
watcher *inotify.Watcher
|
||||
|
||||
// Map of containers being watched to cgroup paths watched for that container.
|
||||
containersWatched map[string]map[string]bool
|
||||
|
|
@ -34,28 +34,28 @@ type FsnotifyWatcher struct {
|
|||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewFsnotifyWatcher() (*FsnotifyWatcher, error) {
|
||||
w, err := fsnotify.NewWatcher()
|
||||
func NewInotifyWatcher() (*InotifyWatcher, error) {
|
||||
w, err := inotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FsnotifyWatcher{
|
||||
return &InotifyWatcher{
|
||||
watcher: w,
|
||||
containersWatched: make(map[string]map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Add a watch to the specified directory. Returns if the container was already being watched.
|
||||
func (iw *FsnotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
func (iw *InotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
cgroupsWatched, alreadyWatched := iw.containersWatched[containerName]
|
||||
|
||||
// Register an fsnotify notification.
|
||||
// Register an inotify notification.
|
||||
if !cgroupsWatched[dir] {
|
||||
err := iw.watcher.Add(dir)
|
||||
err := iw.watcher.AddWatch(dir, inotify.IN_CREATE|inotify.IN_DELETE|inotify.IN_MOVE)
|
||||
if err != nil {
|
||||
return alreadyWatched, err
|
||||
}
|
||||
|
|
@ -74,7 +74,7 @@ func (iw *FsnotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
|||
}
|
||||
|
||||
// Remove watch from the specified directory. Returns if this was the last watch on the specified container.
|
||||
func (iw *FsnotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
|
|
@ -84,9 +84,9 @@ func (iw *FsnotifyWatcher) RemoveWatch(containerName, dir string) (bool, error)
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// Remove the fsnotify watch if it exists.
|
||||
// Remove the inotify watch if it exists.
|
||||
if cgroupsWatched[dir] {
|
||||
err := iw.watcher.Remove(dir)
|
||||
err := iw.watcher.RemoveWatch(dir)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -103,22 +103,22 @@ func (iw *FsnotifyWatcher) RemoveWatch(containerName, dir string) (bool, error)
|
|||
}
|
||||
|
||||
// Errors are returned on this channel.
|
||||
func (iw *FsnotifyWatcher) Error() chan error {
|
||||
return iw.watcher.Errors
|
||||
func (iw *InotifyWatcher) Error() chan error {
|
||||
return iw.watcher.Error
|
||||
}
|
||||
|
||||
// Events are returned on this channel.
|
||||
func (iw *FsnotifyWatcher) Event() chan fsnotify.Event {
|
||||
return iw.watcher.Events
|
||||
func (iw *InotifyWatcher) Event() chan *inotify.Event {
|
||||
return iw.watcher.Event
|
||||
}
|
||||
|
||||
// Closes the fsnotify watcher.
|
||||
func (iw *FsnotifyWatcher) Close() error {
|
||||
// Closes the inotify watcher.
|
||||
func (iw *InotifyWatcher) Close() error {
|
||||
return iw.watcher.Close()
|
||||
}
|
||||
|
||||
// Returns a map of containers to the cgroup paths being watched.
|
||||
func (iw *FsnotifyWatcher) GetWatches() map[string][]string {
|
||||
func (iw *InotifyWatcher) GetWatches() map[string][]string {
|
||||
out := make(map[string][]string, len(iw.containersWatched))
|
||||
for k, v := range iw.containersWatched {
|
||||
out[k] = mapToSlice(v)
|
||||
|
|
@ -40,8 +40,8 @@ type rawFactory struct {
|
|||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
// Watcher for fsnotify events.
|
||||
watcher *common.FsnotifyWatcher
|
||||
// Watcher for inotify events.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// List of metrics to be ignored.
|
||||
ignoreMetrics map[container.MetricKind]struct{}
|
||||
|
|
@ -78,7 +78,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
|||
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewFsnotifyWatcher()
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func isRootCgroup(name string) bool {
|
|||
return name == "/"
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.FsnotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name)
|
||||
|
||||
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
|
||||
|
|
|
|||
22
cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
22
cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/exp/inotify"
|
||||
)
|
||||
|
||||
type rawContainerWatcher struct {
|
||||
|
|
@ -37,8 +37,8 @@ type rawContainerWatcher struct {
|
|||
|
||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
||||
|
||||
// Fsnotify event watcher.
|
||||
watcher *common.FsnotifyWatcher
|
||||
// Inotify event watcher.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// Signal for watcher thread to stop.
|
||||
stopWatcher chan error
|
||||
|
|
@ -53,7 +53,7 @@ func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
|
|||
return nil, fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewFsnotifyWatcher()
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -121,7 +121,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
|
|||
if cleanup {
|
||||
_, err := self.watcher.RemoveWatch(containerName, dir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove fsnotify watch for %q: %v", dir, err)
|
||||
glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
@ -163,16 +163,18 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
|
|||
return alreadyWatching, nil
|
||||
}
|
||||
|
||||
func (self *rawContainerWatcher) processEvent(event fsnotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the fsnotify event type to a container create or delete.
|
||||
func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the inotify event type to a container create or delete.
|
||||
var eventType watcher.ContainerEventType
|
||||
switch {
|
||||
case event.Op == fsnotify.Create:
|
||||
case (event.Mask & inotify.IN_CREATE) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
case event.Op == fsnotify.Remove:
|
||||
case (event.Mask & inotify.IN_DELETE) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case event.Op == fsnotify.Rename:
|
||||
case (event.Mask & inotify.IN_MOVED_FROM) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.IN_MOVED_TO) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
default:
|
||||
// Ignore other events.
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ go_library(
|
|||
importpath = "k8s.io/api/autoscaling/v2beta1",
|
||||
deps = [
|
||||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -31,29 +31,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
|||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v2beta1";
|
||||
|
||||
// ContainerResourcePolicy controls how autoscaler computes the recommended
|
||||
// resources for a specific container.
|
||||
message ContainerResourcePolicy {
|
||||
// Name of the container or DefaultContainerResourcePolicy, in which
|
||||
// case the policy is used by the containers that don't have their own
|
||||
// policy specified.
|
||||
optional string containerName = 1;
|
||||
|
||||
// Whether autoscaler is enabled for the container. The default is "Auto".
|
||||
// +optional
|
||||
optional string mode = 2;
|
||||
|
||||
// Specifies the minimal amount of resources that will be recommended
|
||||
// for the container. The default is no minimum.
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> minAllowed = 3;
|
||||
|
||||
// Specifies the maximum amount of resources that will be recommended
|
||||
// for the container. The default is no maximum.
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> maxAllowed = 4;
|
||||
}
|
||||
|
||||
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
|
||||
message CrossVersionObjectReference {
|
||||
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
|
||||
|
|
@ -312,26 +289,6 @@ message ObjectMetricStatus {
|
|||
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
|
||||
}
|
||||
|
||||
// PodResourcePolicy controls how autoscaler computes the recommended resources
|
||||
// for containers belonging to the pod. There can be at most one entry for every
|
||||
// named container and optionally a single wildcard entry with `containerName` = '*',
|
||||
// which handles all containers that don't have individual policies.
|
||||
message PodResourcePolicy {
|
||||
// Per-container resource policies.
|
||||
// +optional
|
||||
// +patchMergeKey=containerName
|
||||
// +patchStrategy=merge
|
||||
repeated ContainerResourcePolicy containerPolicies = 1;
|
||||
}
|
||||
|
||||
// PodUpdatePolicy describes the rules on how changes are applied to the pods.
|
||||
message PodUpdatePolicy {
|
||||
// Controls when autoscaler applies changes to the pod resources.
|
||||
// The default is 'Auto'.
|
||||
// +optional
|
||||
optional string updateMode = 1;
|
||||
}
|
||||
|
||||
// PodsMetricSource indicates how to scale on a metric describing each pod in
|
||||
// the current scale target (for example, transactions-processed-per-second).
|
||||
// The values will be averaged together before being compared to the target
|
||||
|
|
@ -356,39 +313,6 @@ message PodsMetricStatus {
|
|||
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
|
||||
}
|
||||
|
||||
// RecommendedContainerResources is the recommendation of resources computed by
|
||||
// autoscaler for a specific container. Respects the container resource policy
|
||||
// if present in the spec. In particular the recommendation is not produced for
|
||||
// containers with `ContainerScalingMode` set to 'Off'.
|
||||
message RecommendedContainerResources {
|
||||
// Name of the container.
|
||||
optional string containerName = 1;
|
||||
|
||||
// Recommended amount of resources.
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> target = 2;
|
||||
|
||||
// Minimum recommended amount of resources.
|
||||
// This amount is not guaranteed to be sufficient for the application to operate in a stable way, however
|
||||
// running with less resources is likely to have significant impact on performance/availability.
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> lowerBound = 3;
|
||||
|
||||
// Maximum recommended amount of resources.
|
||||
// Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum
|
||||
// amount of application is actually capable of consuming.
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> upperBound = 4;
|
||||
}
|
||||
|
||||
// RecommendedPodResources is the recommendation of resources computed by
|
||||
// autoscaler. It contains a recommendation for each container in the pod
|
||||
// (except for those with `ContainerScalingMode` set to 'Off').
|
||||
message RecommendedPodResources {
|
||||
// Resources recommended by the autoscaler for each container.
|
||||
// +optional
|
||||
repeated RecommendedContainerResources containerRecommendations = 1;
|
||||
}
|
||||
|
||||
// ResourceMetricSource indicates how to scale on a resource metric known to
|
||||
// Kubernetes, as specified in requests and limits, describing each pod in the
|
||||
// current scale target (e.g. CPU or memory). The values will be averaged
|
||||
|
|
@ -437,89 +361,3 @@ message ResourceMetricStatus {
|
|||
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
|
||||
}
|
||||
|
||||
// VerticalPodAutoscaler is the configuration for a vertical pod
|
||||
// autoscaler, which automatically manages pod resources based on historical and
|
||||
// real time resource utilization.
|
||||
message VerticalPodAutoscaler {
|
||||
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the behavior of the autoscaler.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
optional VerticalPodAutoscalerSpec spec = 2;
|
||||
|
||||
// Current information about the autoscaler.
|
||||
// +optional
|
||||
optional VerticalPodAutoscalerStatus status = 3;
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerCondition describes the state of
|
||||
// a VerticalPodAutoscaler at a certain point.
|
||||
message VerticalPodAutoscalerCondition {
|
||||
// type describes the current condition
|
||||
optional string type = 1;
|
||||
|
||||
// status is the status of the condition (True, False, Unknown)
|
||||
optional string status = 2;
|
||||
|
||||
// lastTransitionTime is the last time the condition transitioned from
|
||||
// one status to another
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// reason is the reason for the condition's last transition.
|
||||
// +optional
|
||||
optional string reason = 4;
|
||||
|
||||
// message is a human-readable explanation containing details about
|
||||
// the transition
|
||||
// +optional
|
||||
optional string message = 5;
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects.
|
||||
message VerticalPodAutoscalerList {
|
||||
// metadata is the standard list metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of vertical pod autoscaler objects.
|
||||
repeated VerticalPodAutoscaler items = 2;
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler.
|
||||
message VerticalPodAutoscalerSpec {
|
||||
// A label query that determines the set of pods controlled by the Autoscaler.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
|
||||
|
||||
// Describes the rules on how changes are applied to the pods.
|
||||
// If not specified, all fields in the `PodUpdatePolicy` are set to their
|
||||
// default values.
|
||||
// +optional
|
||||
optional PodUpdatePolicy updatePolicy = 2;
|
||||
|
||||
// Controls how the autoscaler computes recommended resources.
|
||||
// The resource policy may be used to set constraints on the recommendations
|
||||
// for individual containers. If not specified, the autoscaler computes recommended
|
||||
// resources for all containers in the pod, without additional constraints.
|
||||
// +optional
|
||||
optional PodResourcePolicy resourcePolicy = 3;
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.
|
||||
message VerticalPodAutoscalerStatus {
|
||||
// The most recently computed amount of resources recommended by the
|
||||
// autoscaler for the controlled pods.
|
||||
// +optional
|
||||
optional RecommendedPodResources recommendation = 1;
|
||||
|
||||
// Conditions is the set of conditions required for this autoscaler to scale its target,
|
||||
// and indicates whether or not those conditions are met.
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
repeated VerticalPodAutoscalerCondition conditions = 2;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ** Horizontal Pod Autoscaler types start here **
|
||||
|
||||
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
|
||||
type CrossVersionObjectReference struct {
|
||||
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
|
||||
|
|
@ -372,212 +370,3 @@ type HorizontalPodAutoscalerList struct {
|
|||
// items is the list of horizontal pod autoscaler objects.
|
||||
Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ** Vertical Pod Autoscaler types start here **
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects.
|
||||
type VerticalPodAutoscalerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// metadata is the standard list metadata.
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is the list of vertical pod autoscaler objects.
|
||||
Items []VerticalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VerticalPodAutoscaler is the configuration for a vertical pod
|
||||
// autoscaler, which automatically manages pod resources based on historical and
|
||||
// real time resource utilization.
|
||||
type VerticalPodAutoscaler struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Specification of the behavior of the autoscaler.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
Spec VerticalPodAutoscalerSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
|
||||
// Current information about the autoscaler.
|
||||
// +optional
|
||||
Status VerticalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler.
|
||||
type VerticalPodAutoscalerSpec struct {
|
||||
// A label query that determines the set of pods controlled by the Autoscaler.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,name=selector"`
|
||||
|
||||
// Describes the rules on how changes are applied to the pods.
|
||||
// If not specified, all fields in the `PodUpdatePolicy` are set to their
|
||||
// default values.
|
||||
// +optional
|
||||
UpdatePolicy *PodUpdatePolicy `json:"updatePolicy,omitempty" protobuf:"bytes,2,opt,name=updatePolicy"`
|
||||
|
||||
// Controls how the autoscaler computes recommended resources.
|
||||
// The resource policy may be used to set constraints on the recommendations
|
||||
// for individual containers. If not specified, the autoscaler computes recommended
|
||||
// resources for all containers in the pod, without additional constraints.
|
||||
// +optional
|
||||
ResourcePolicy *PodResourcePolicy `json:"resourcePolicy,omitempty" protobuf:"bytes,3,opt,name=resourcePolicy"`
|
||||
}
|
||||
|
||||
// PodUpdatePolicy describes the rules on how changes are applied to the pods.
|
||||
type PodUpdatePolicy struct {
|
||||
// Controls when autoscaler applies changes to the pod resources.
|
||||
// The default is 'Auto'.
|
||||
// +optional
|
||||
UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"`
|
||||
}
|
||||
|
||||
// UpdateMode controls when autoscaler applies changes to the pod resoures.
|
||||
type UpdateMode string
|
||||
|
||||
const (
|
||||
// UpdateModeOff means that autoscaler never changes Pod resources.
|
||||
// The recommender still sets the recommended resources in the
|
||||
// VerticalPodAutoscaler object. This can be used for a "dry run".
|
||||
UpdateModeOff UpdateMode = "Off"
|
||||
// UpdateModeInitial means that autoscaler only assigns resources on pod
|
||||
// creation and does not change them during the lifetime of the pod.
|
||||
UpdateModeInitial UpdateMode = "Initial"
|
||||
// UpdateModeRecreate means that autoscaler assigns resources on pod
|
||||
// creation and additionally can update them during the lifetime of the
|
||||
// pod by deleting and recreating the pod.
|
||||
UpdateModeRecreate UpdateMode = "Recreate"
|
||||
// UpdateModeAuto means that autoscaler assigns resources on pod creation
|
||||
// and additionally can update them during the lifetime of the pod,
|
||||
// using any available update method. Currently this is equivalent to
|
||||
// Recreate, which is the only available update method.
|
||||
UpdateModeAuto UpdateMode = "Auto"
|
||||
)
|
||||
|
||||
// PodResourcePolicy controls how autoscaler computes the recommended resources
|
||||
// for containers belonging to the pod. There can be at most one entry for every
|
||||
// named container and optionally a single wildcard entry with `containerName` = '*',
|
||||
// which handles all containers that don't have individual policies.
|
||||
type PodResourcePolicy struct {
|
||||
// Per-container resource policies.
|
||||
// +optional
|
||||
// +patchMergeKey=containerName
|
||||
// +patchStrategy=merge
|
||||
ContainerPolicies []ContainerResourcePolicy `json:"containerPolicies,omitempty" patchStrategy:"merge" patchMergeKey:"containerName" protobuf:"bytes,1,rep,name=containerPolicies"`
|
||||
}
|
||||
|
||||
// ContainerResourcePolicy controls how autoscaler computes the recommended
|
||||
// resources for a specific container.
|
||||
type ContainerResourcePolicy struct {
|
||||
// Name of the container or DefaultContainerResourcePolicy, in which
|
||||
// case the policy is used by the containers that don't have their own
|
||||
// policy specified.
|
||||
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
|
||||
// Whether autoscaler is enabled for the container. The default is "Auto".
|
||||
// +optional
|
||||
Mode *ContainerScalingMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode"`
|
||||
// Specifies the minimal amount of resources that will be recommended
|
||||
// for the container. The default is no minimum.
|
||||
// +optional
|
||||
MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Specifies the maximum amount of resources that will be recommended
|
||||
// for the container. The default is no maximum.
|
||||
// +optional
|
||||
MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"`
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultContainerResourcePolicy can be passed as
|
||||
// ContainerResourcePolicy.ContainerName to specify the default policy.
|
||||
DefaultContainerResourcePolicy = "*"
|
||||
)
|
||||
|
||||
// ContainerScalingMode controls whether autoscaler is enabled for a specific
|
||||
// container.
|
||||
type ContainerScalingMode string
|
||||
|
||||
const (
|
||||
// ContainerScalingModeAuto means autoscaling is enabled for a container.
|
||||
ContainerScalingModeAuto ContainerScalingMode = "Auto"
|
||||
// ContainerScalingModeOff means autoscaling is disabled for a container.
|
||||
ContainerScalingModeOff ContainerScalingMode = "Off"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.
|
||||
type VerticalPodAutoscalerStatus struct {
|
||||
// The most recently computed amount of resources recommended by the
|
||||
// autoscaler for the controlled pods.
|
||||
// +optional
|
||||
Recommendation *RecommendedPodResources `json:"recommendation,omitempty" protobuf:"bytes,1,opt,name=recommendation"`
|
||||
|
||||
// Conditions is the set of conditions required for this autoscaler to scale its target,
|
||||
// and indicates whether or not those conditions are met.
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
Conditions []VerticalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// RecommendedPodResources is the recommendation of resources computed by
|
||||
// autoscaler. It contains a recommendation for each container in the pod
|
||||
// (except for those with `ContainerScalingMode` set to 'Off').
|
||||
type RecommendedPodResources struct {
|
||||
// Resources recommended by the autoscaler for each container.
|
||||
// +optional
|
||||
ContainerRecommendations []RecommendedContainerResources `json:"containerRecommendations,omitempty" protobuf:"bytes,1,rep,name=containerRecommendations"`
|
||||
}
|
||||
|
||||
// RecommendedContainerResources is the recommendation of resources computed by
|
||||
// autoscaler for a specific container. Respects the container resource policy
|
||||
// if present in the spec. In particular the recommendation is not produced for
|
||||
// containers with `ContainerScalingMode` set to 'Off'.
|
||||
type RecommendedContainerResources struct {
|
||||
// Name of the container.
|
||||
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
|
||||
// Recommended amount of resources.
|
||||
Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Minimum recommended amount of resources.
|
||||
// This amount is not guaranteed to be sufficient for the application to operate in a stable way, however
|
||||
// running with less resources is likely to have significant impact on performance/availability.
|
||||
// +optional
|
||||
LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Maximum recommended amount of resources.
|
||||
// Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum
|
||||
// amount of application is actually capable of consuming.
|
||||
// +optional
|
||||
UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"`
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerConditionType are the valid conditions of
|
||||
// a VerticalPodAutoscaler.
|
||||
type VerticalPodAutoscalerConditionType string
|
||||
|
||||
var (
|
||||
// RecommendationProvided indicates whether the VPA recommender was able to calculate a recommendation.
|
||||
RecommendationProvided VerticalPodAutoscalerConditionType = "RecommendationProvided"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerCondition describes the state of
|
||||
// a VerticalPodAutoscaler at a certain point.
|
||||
type VerticalPodAutoscalerCondition struct {
|
||||
// type describes the current condition
|
||||
Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"`
|
||||
// status is the status of the condition (True, False, Unknown)
|
||||
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"`
|
||||
// lastTransitionTime is the last time the condition transitioned from
|
||||
// one status to another
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
|
||||
// reason is the reason for the condition's last transition.
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
|
||||
// message is a human-readable explanation containing details about
|
||||
// the transition
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
|
|
|||
106
cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
generated
vendored
106
cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
generated
vendored
|
|
@ -27,18 +27,6 @@ package v2beta1
|
|||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_ContainerResourcePolicy = map[string]string{
|
||||
"": "ContainerResourcePolicy controls how autoscaler computes the recommended resources for a specific container.",
|
||||
"containerName": "Name of the container or DefaultContainerResourcePolicy, in which case the policy is used by the containers that don't have their own policy specified.",
|
||||
"mode": "Whether autoscaler is enabled for the container. The default is \"Auto\".",
|
||||
"minAllowed": "Specifies the minimal amount of resources that will be recommended for the container. The default is no minimum.",
|
||||
"maxAllowed": "Specifies the maximum amount of resources that will be recommended for the container. The default is no maximum.",
|
||||
}
|
||||
|
||||
func (ContainerResourcePolicy) SwaggerDoc() map[string]string {
|
||||
return map_ContainerResourcePolicy
|
||||
}
|
||||
|
||||
var map_CrossVersionObjectReference = map[string]string{
|
||||
"": "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
|
||||
"kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"",
|
||||
|
|
@ -182,24 +170,6 @@ func (ObjectMetricStatus) SwaggerDoc() map[string]string {
|
|||
return map_ObjectMetricStatus
|
||||
}
|
||||
|
||||
var map_PodResourcePolicy = map[string]string{
|
||||
"": "PodResourcePolicy controls how autoscaler computes the recommended resources for containers belonging to the pod. There can be at most one entry for every named container and optionally a single wildcard entry with `containerName` = '*', which handles all containers that don't have individual policies.",
|
||||
"containerPolicies": "Per-container resource policies.",
|
||||
}
|
||||
|
||||
func (PodResourcePolicy) SwaggerDoc() map[string]string {
|
||||
return map_PodResourcePolicy
|
||||
}
|
||||
|
||||
var map_PodUpdatePolicy = map[string]string{
|
||||
"": "PodUpdatePolicy describes the rules on how changes are applied to the pods.",
|
||||
"updateMode": "Controls when autoscaler applies changes to the pod resources. The default is 'Auto'.",
|
||||
}
|
||||
|
||||
func (PodUpdatePolicy) SwaggerDoc() map[string]string {
|
||||
return map_PodUpdatePolicy
|
||||
}
|
||||
|
||||
var map_PodsMetricSource = map[string]string{
|
||||
"": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
|
||||
"metricName": "metricName is the name of the metric in question",
|
||||
|
|
@ -220,27 +190,6 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string {
|
|||
return map_PodsMetricStatus
|
||||
}
|
||||
|
||||
var map_RecommendedContainerResources = map[string]string{
|
||||
"": "RecommendedContainerResources is the recommendation of resources computed by autoscaler for a specific container. Respects the container resource policy if present in the spec. In particular the recommendation is not produced for containers with `ContainerScalingMode` set to 'Off'.",
|
||||
"containerName": "Name of the container.",
|
||||
"target": "Recommended amount of resources.",
|
||||
"lowerBound": "Minimum recommended amount of resources. This amount is not guaranteed to be sufficient for the application to operate in a stable way, however running with less resources is likely to have significant impact on performance/availability.",
|
||||
"upperBound": "Maximum recommended amount of resources. Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum amount of application is actually capable of consuming.",
|
||||
}
|
||||
|
||||
func (RecommendedContainerResources) SwaggerDoc() map[string]string {
|
||||
return map_RecommendedContainerResources
|
||||
}
|
||||
|
||||
var map_RecommendedPodResources = map[string]string{
|
||||
"": "RecommendedPodResources is the recommendation of resources computed by autoscaler. It contains a recommendation for each container in the pod (except for those with `ContainerScalingMode` set to 'Off').",
|
||||
"containerRecommendations": "Resources recommended by the autoscaler for each container.",
|
||||
}
|
||||
|
||||
func (RecommendedPodResources) SwaggerDoc() map[string]string {
|
||||
return map_RecommendedPodResources
|
||||
}
|
||||
|
||||
var map_ResourceMetricSource = map[string]string{
|
||||
"": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
|
|
@ -263,59 +212,4 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string {
|
|||
return map_ResourceMetricStatus
|
||||
}
|
||||
|
||||
var map_VerticalPodAutoscaler = map[string]string{
|
||||
"": "VerticalPodAutoscaler is the configuration for a vertical pod autoscaler, which automatically manages pod resources based on historical and real time resource utilization.",
|
||||
"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"spec": "Specification of the behavior of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
|
||||
"status": "Current information about the autoscaler.",
|
||||
}
|
||||
|
||||
func (VerticalPodAutoscaler) SwaggerDoc() map[string]string {
|
||||
return map_VerticalPodAutoscaler
|
||||
}
|
||||
|
||||
var map_VerticalPodAutoscalerCondition = map[string]string{
|
||||
"": "VerticalPodAutoscalerCondition describes the state of a VerticalPodAutoscaler at a certain point.",
|
||||
"type": "type describes the current condition",
|
||||
"status": "status is the status of the condition (True, False, Unknown)",
|
||||
"lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another",
|
||||
"reason": "reason is the reason for the condition's last transition.",
|
||||
"message": "message is a human-readable explanation containing details about the transition",
|
||||
}
|
||||
|
||||
func (VerticalPodAutoscalerCondition) SwaggerDoc() map[string]string {
|
||||
return map_VerticalPodAutoscalerCondition
|
||||
}
|
||||
|
||||
var map_VerticalPodAutoscalerList = map[string]string{
|
||||
"": "VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects.",
|
||||
"metadata": "metadata is the standard list metadata.",
|
||||
"items": "items is the list of vertical pod autoscaler objects.",
|
||||
}
|
||||
|
||||
func (VerticalPodAutoscalerList) SwaggerDoc() map[string]string {
|
||||
return map_VerticalPodAutoscalerList
|
||||
}
|
||||
|
||||
var map_VerticalPodAutoscalerSpec = map[string]string{
|
||||
"": "VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler.",
|
||||
"selector": "A label query that determines the set of pods controlled by the Autoscaler. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
||||
"updatePolicy": "Describes the rules on how changes are applied to the pods. If not specified, all fields in the `PodUpdatePolicy` are set to their default values.",
|
||||
"resourcePolicy": "Controls how the autoscaler computes recommended resources. The resource policy may be used to set constraints on the recommendations for individual containers. If not specified, the autoscaler computes recommended resources for all containers in the pod, without additional constraints.",
|
||||
}
|
||||
|
||||
func (VerticalPodAutoscalerSpec) SwaggerDoc() map[string]string {
|
||||
return map_VerticalPodAutoscalerSpec
|
||||
}
|
||||
|
||||
var map_VerticalPodAutoscalerStatus = map[string]string{
|
||||
"": "VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.",
|
||||
"recommendation": "The most recently computed amount of resources recommended by the autoscaler for the controlled pods.",
|
||||
"conditions": "Conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.",
|
||||
}
|
||||
|
||||
func (VerticalPodAutoscalerStatus) SwaggerDoc() map[string]string {
|
||||
return map_VerticalPodAutoscalerStatus
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
|
|||
307
cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go
generated
vendored
307
cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go
generated
vendored
|
|
@ -21,50 +21,10 @@ limitations under the License.
|
|||
package v2beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ContainerResourcePolicy) DeepCopyInto(out *ContainerResourcePolicy) {
|
||||
*out = *in
|
||||
if in.Mode != nil {
|
||||
in, out := &in.Mode, &out.Mode
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ContainerScalingMode)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.MinAllowed != nil {
|
||||
in, out := &in.MinAllowed, &out.MinAllowed
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.MaxAllowed != nil {
|
||||
in, out := &in.MaxAllowed, &out.MaxAllowed
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourcePolicy.
|
||||
func (in *ContainerResourcePolicy) DeepCopy() *ContainerResourcePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ContainerResourcePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
|
||||
*out = *in
|
||||
|
|
@ -89,7 +49,7 @@ func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
|
|||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
|
@ -132,7 +92,7 @@ func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
|
|||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
|
@ -457,54 +417,6 @@ func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodResourcePolicy) DeepCopyInto(out *PodResourcePolicy) {
|
||||
*out = *in
|
||||
if in.ContainerPolicies != nil {
|
||||
in, out := &in.ContainerPolicies, &out.ContainerPolicies
|
||||
*out = make([]ContainerResourcePolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourcePolicy.
|
||||
func (in *PodResourcePolicy) DeepCopy() *PodResourcePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodResourcePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodUpdatePolicy) DeepCopyInto(out *PodUpdatePolicy) {
|
||||
*out = *in
|
||||
if in.UpdateMode != nil {
|
||||
in, out := &in.UpdateMode, &out.UpdateMode
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(UpdateMode)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodUpdatePolicy.
|
||||
func (in *PodUpdatePolicy) DeepCopy() *PodUpdatePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodUpdatePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
|
||||
*out = *in
|
||||
|
|
@ -539,66 +451,6 @@ func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RecommendedContainerResources) DeepCopyInto(out *RecommendedContainerResources) {
|
||||
*out = *in
|
||||
if in.Target != nil {
|
||||
in, out := &in.Target, &out.Target
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.LowerBound != nil {
|
||||
in, out := &in.LowerBound, &out.LowerBound
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.UpperBound != nil {
|
||||
in, out := &in.UpperBound, &out.UpperBound
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedContainerResources.
|
||||
func (in *RecommendedContainerResources) DeepCopy() *RecommendedContainerResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RecommendedContainerResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RecommendedPodResources) DeepCopyInto(out *RecommendedPodResources) {
|
||||
*out = *in
|
||||
if in.ContainerRecommendations != nil {
|
||||
in, out := &in.ContainerRecommendations, &out.ContainerRecommendations
|
||||
*out = make([]RecommendedContainerResources, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPodResources.
|
||||
func (in *RecommendedPodResources) DeepCopy() *RecommendedPodResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RecommendedPodResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
|
||||
*out = *in
|
||||
|
|
@ -658,156 +510,3 @@ func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler.
|
||||
func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscaler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerCondition) DeepCopyInto(out *VerticalPodAutoscalerCondition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCondition.
|
||||
func (in *VerticalPodAutoscalerCondition) DeepCopy() *VerticalPodAutoscalerCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopyInto(out *VerticalPodAutoscalerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VerticalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerList.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerSpec) DeepCopyInto(out *VerticalPodAutoscalerSpec) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.UpdatePolicy != nil {
|
||||
in, out := &in.UpdatePolicy, &out.UpdatePolicy
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PodUpdatePolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.ResourcePolicy != nil {
|
||||
in, out := &in.ResourcePolicy, &out.ResourcePolicy
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PodResourcePolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerSpec.
|
||||
func (in *VerticalPodAutoscalerSpec) DeepCopy() *VerticalPodAutoscalerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerStatus) DeepCopyInto(out *VerticalPodAutoscalerStatus) {
|
||||
*out = *in
|
||||
if in.Recommendation != nil {
|
||||
in, out := &in.Recommendation, &out.Recommendation
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(RecommendedPodResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]VerticalPodAutoscalerCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerStatus.
|
||||
func (in *VerticalPodAutoscalerStatus) DeepCopy() *VerticalPodAutoscalerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ go_library(
|
|||
srcs = [
|
||||
"horizontalpodautoscaler.go",
|
||||
"interface.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/informers/autoscaling/v2beta1",
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ import (
|
|||
type Interface interface {
|
||||
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
|
||||
HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
|
||||
// VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer.
|
||||
VerticalPodAutoscalers() VerticalPodAutoscalerInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
|
|
@ -45,8 +43,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
|
|||
func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
|
||||
return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer.
|
||||
func (v *version) VerticalPodAutoscalers() VerticalPodAutoscalerInformer {
|
||||
return &verticalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
autoscaling_v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerInformer provides access to a shared informer and lister for
|
||||
// VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v2beta1.VerticalPodAutoscalerLister
|
||||
}
|
||||
|
||||
type verticalPodAutoscalerInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewVerticalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredVerticalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredVerticalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV2beta1().VerticalPodAutoscalers(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV2beta1().VerticalPodAutoscalers(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscaling_v2beta1.VerticalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredVerticalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscaling_v2beta1.VerticalPodAutoscaler{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) Lister() v2beta1.VerticalPodAutoscalerLister {
|
||||
return v2beta1.NewVerticalPodAutoscalerLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
|
@ -125,8 +125,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||
// Group=autoscaling, Version=v2beta1
|
||||
case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil
|
||||
case v2beta1.SchemeGroupVersion.WithResource("verticalpodautoscalers"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().VerticalPodAutoscalers().Informer()}, nil
|
||||
|
||||
// Group=batch, Version=v1
|
||||
case batch_v1.SchemeGroupVersion.WithResource("jobs"):
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ go_library(
|
|||
"doc.go",
|
||||
"generated_expansion.go",
|
||||
"horizontalpodautoscaler.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1",
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
type AutoscalingV2beta1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
HorizontalPodAutoscalersGetter
|
||||
VerticalPodAutoscalersGetter
|
||||
}
|
||||
|
||||
// AutoscalingV2beta1Client is used to interact with features provided by the autoscaling group.
|
||||
|
|
@ -40,10 +39,6 @@ func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) Ho
|
|||
return newHorizontalPodAutoscalers(c, namespace)
|
||||
}
|
||||
|
||||
func (c *AutoscalingV2beta1Client) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface {
|
||||
return newVerticalPodAutoscalers(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new AutoscalingV2beta1Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) {
|
||||
config := *c
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ go_library(
|
|||
"doc.go",
|
||||
"fake_autoscaling_client.go",
|
||||
"fake_horizontalpodautoscaler.go",
|
||||
"fake_verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake",
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
|
|||
|
|
@ -32,10 +32,6 @@ func (c *FakeAutoscalingV2beta1) HorizontalPodAutoscalers(namespace string) v2be
|
|||
return &FakeHorizontalPodAutoscalers{c, namespace}
|
||||
}
|
||||
|
||||
func (c *FakeAutoscalingV2beta1) VerticalPodAutoscalers(namespace string) v2beta1.VerticalPodAutoscalerInterface {
|
||||
return &FakeVerticalPodAutoscalers{c, namespace}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeAutoscalingV2beta1) RESTClient() rest.Interface {
|
||||
|
|
|
|||
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeVerticalPodAutoscalers implements VerticalPodAutoscalerInterface
|
||||
type FakeVerticalPodAutoscalers struct {
|
||||
Fake *FakeAutoscalingV2beta1
|
||||
ns string
|
||||
}
|
||||
|
||||
var verticalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta1", Resource: "verticalpodautoscalers"}
|
||||
|
||||
var verticalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta1", Kind: "VerticalPodAutoscaler"}
|
||||
|
||||
// Get takes name of the verticalPodAutoscaler, and returns the corresponding verticalPodAutoscaler object, and an error if there is any.
|
||||
func (c *FakeVerticalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(verticalpodautoscalersResource, c.ns, name), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of VerticalPodAutoscalers that match those selectors.
|
||||
func (c *FakeVerticalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.VerticalPodAutoscalerList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(verticalpodautoscalersResource, verticalpodautoscalersKind, c.ns, opts), &v2beta1.VerticalPodAutoscalerList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v2beta1.VerticalPodAutoscalerList{ListMeta: obj.(*v2beta1.VerticalPodAutoscalerList).ListMeta}
|
||||
for _, item := range obj.(*v2beta1.VerticalPodAutoscalerList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested verticalPodAutoscalers.
|
||||
func (c *FakeVerticalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(verticalpodautoscalersResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a verticalPodAutoscaler and creates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *FakeVerticalPodAutoscalers) Create(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(verticalpodautoscalersResource, c.ns, verticalPodAutoscaler), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a verticalPodAutoscaler and updates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *FakeVerticalPodAutoscalers) Update(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(verticalpodautoscalersResource, c.ns, verticalPodAutoscaler), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeVerticalPodAutoscalers) UpdateStatus(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (*v2beta1.VerticalPodAutoscaler, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(verticalpodautoscalersResource, "status", c.ns, verticalPodAutoscaler), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), err
|
||||
}
|
||||
|
||||
// Delete takes name of the verticalPodAutoscaler and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeVerticalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(verticalpodautoscalersResource, c.ns, name), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeVerticalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(verticalpodautoscalersResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v2beta1.VerticalPodAutoscalerList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched verticalPodAutoscaler.
|
||||
func (c *FakeVerticalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(verticalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta1.VerticalPodAutoscaler{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), err
|
||||
}
|
||||
|
|
@ -19,5 +19,3 @@ limitations under the License.
|
|||
package v2beta1
|
||||
|
||||
type HorizontalPodAutoscalerExpansion interface{}
|
||||
|
||||
type VerticalPodAutoscalerExpansion interface{}
|
||||
|
|
|
|||
|
|
@ -1,174 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalersGetter has a method to return a VerticalPodAutoscalerInterface.
|
||||
// A group's client should implement this interface.
|
||||
type VerticalPodAutoscalersGetter interface {
|
||||
VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerInterface has methods to work with VerticalPodAutoscaler resources.
|
||||
type VerticalPodAutoscalerInterface interface {
|
||||
Create(*v2beta1.VerticalPodAutoscaler) (*v2beta1.VerticalPodAutoscaler, error)
|
||||
Update(*v2beta1.VerticalPodAutoscaler) (*v2beta1.VerticalPodAutoscaler, error)
|
||||
UpdateStatus(*v2beta1.VerticalPodAutoscaler) (*v2beta1.VerticalPodAutoscaler, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v2beta1.VerticalPodAutoscaler, error)
|
||||
List(opts v1.ListOptions) (*v2beta1.VerticalPodAutoscalerList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.VerticalPodAutoscaler, err error)
|
||||
VerticalPodAutoscalerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalers implements VerticalPodAutoscalerInterface
|
||||
type verticalPodAutoscalers struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newVerticalPodAutoscalers returns a VerticalPodAutoscalers
|
||||
func newVerticalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *verticalPodAutoscalers {
|
||||
return &verticalPodAutoscalers{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the verticalPodAutoscaler, and returns the corresponding verticalPodAutoscaler object, and an error if there is any.
|
||||
func (c *verticalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscaler{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of VerticalPodAutoscalers that match those selectors.
|
||||
func (c *verticalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.VerticalPodAutoscalerList, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscalerList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested verticalPodAutoscalers.
|
||||
func (c *verticalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a verticalPodAutoscaler and creates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *verticalPodAutoscalers) Create(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscaler{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a verticalPodAutoscaler and updates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *verticalPodAutoscalers) Update(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscaler{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(verticalPodAutoscaler.Name).
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *verticalPodAutoscalers) UpdateStatus(verticalPodAutoscaler *v2beta1.VerticalPodAutoscaler) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscaler{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(verticalPodAutoscaler.Name).
|
||||
SubResource("status").
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the verticalPodAutoscaler and deletes it. Returns an error if one occurs.
|
||||
func (c *verticalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *verticalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched verticalPodAutoscaler.
|
||||
func (c *verticalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.VerticalPodAutoscaler, err error) {
|
||||
result = &v2beta1.VerticalPodAutoscaler{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
@ -5,7 +5,6 @@ go_library(
|
|||
srcs = [
|
||||
"expansion_generated.go",
|
||||
"horizontalpodautoscaler.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/listers/autoscaling/v2beta1",
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
|
|||
|
|
@ -25,11 +25,3 @@ type HorizontalPodAutoscalerListerExpansion interface{}
|
|||
// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
|
||||
// HorizontalPodAutoscalerNamespaceLister.
|
||||
type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
|
||||
|
||||
// VerticalPodAutoscalerListerExpansion allows custom methods to be added to
|
||||
// VerticalPodAutoscalerLister.
|
||||
type VerticalPodAutoscalerListerExpansion interface{}
|
||||
|
||||
// VerticalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
|
||||
// VerticalPodAutoscalerNamespaceLister.
|
||||
type VerticalPodAutoscalerNamespaceListerExpansion interface{}
|
||||
|
|
|
|||
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerLister helps list VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerLister interface {
|
||||
// List lists all VerticalPodAutoscalers in the indexer.
|
||||
List(selector labels.Selector) (ret []*v2beta1.VerticalPodAutoscaler, err error)
|
||||
// VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers.
|
||||
VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister
|
||||
VerticalPodAutoscalerListerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalerLister implements the VerticalPodAutoscalerLister interface.
|
||||
type verticalPodAutoscalerLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewVerticalPodAutoscalerLister returns a new VerticalPodAutoscalerLister.
|
||||
func NewVerticalPodAutoscalerLister(indexer cache.Indexer) VerticalPodAutoscalerLister {
|
||||
return &verticalPodAutoscalerLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all VerticalPodAutoscalers in the indexer.
|
||||
func (s *verticalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.VerticalPodAutoscaler, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v2beta1.VerticalPodAutoscaler))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers.
|
||||
func (s *verticalPodAutoscalerLister) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister {
|
||||
return verticalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerNamespaceLister helps list and get VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerNamespaceLister interface {
|
||||
// List lists all VerticalPodAutoscalers in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v2beta1.VerticalPodAutoscaler, err error)
|
||||
// Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name.
|
||||
Get(name string) (*v2beta1.VerticalPodAutoscaler, error)
|
||||
VerticalPodAutoscalerNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalerNamespaceLister implements the VerticalPodAutoscalerNamespaceLister
|
||||
// interface.
|
||||
type verticalPodAutoscalerNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all VerticalPodAutoscalers in the indexer for a given namespace.
|
||||
func (s verticalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.VerticalPodAutoscaler, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v2beta1.VerticalPodAutoscaler))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name.
|
||||
func (s verticalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.VerticalPodAutoscaler, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v2beta1.Resource("verticalpodautoscaler"), name)
|
||||
}
|
||||
return obj.(*v2beta1.VerticalPodAutoscaler), nil
|
||||
}
|
||||
|
|
@ -88,7 +88,8 @@ func WriteKey(keyPath string, data []byte) error {
|
|||
// can't find one, it will generate a new key and store it there.
|
||||
func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
|
||||
loadedData, err := ioutil.ReadFile(keyPath)
|
||||
if err == nil {
|
||||
// Call verifyKeyData to ensure the file wasn't empty/corrupt.
|
||||
if err == nil && verifyKeyData(loadedData) {
|
||||
return loadedData, false, err
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
|
|
@ -181,3 +182,12 @@ func PublicKeysFromFile(file string) ([]interface{}, error) {
|
|||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// verifyKeyData returns true if the provided data appears to be a valid private key.
|
||||
func verifyKeyData(data []byte) bool {
|
||||
if len(data) == 0 {
|
||||
return false
|
||||
}
|
||||
_, err := ParsePrivateKeyPEM(data)
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
|||
21
cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go
generated
vendored
21
cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go
generated
vendored
|
|
@ -195,8 +195,6 @@ type KubeletFlags struct {
|
|||
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
|
||||
// This can be useful for debugging volume related issues.
|
||||
KeepTerminatedPodVolumes bool
|
||||
// enable gathering custom metrics.
|
||||
EnableCustomMetrics bool
|
||||
// allowPrivileged enables containers to request privileged mode.
|
||||
// Defaults to true.
|
||||
AllowPrivileged bool
|
||||
|
|
@ -234,15 +232,13 @@ func NewKubeletFlags() *KubeletFlags {
|
|||
RegisterSchedulable: true,
|
||||
ExperimentalKernelMemcgNotification: false,
|
||||
RemoteRuntimeEndpoint: remoteRuntimeEndpoint,
|
||||
// TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated.
|
||||
EnableCustomMetrics: false,
|
||||
NodeLabels: make(map[string]string),
|
||||
VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
RegisterNode: true,
|
||||
SeccompProfileRoot: filepath.Join(defaultRootDir, "seccomp"),
|
||||
HostNetworkSources: []string{kubetypes.AllSource},
|
||||
HostPIDSources: []string{kubetypes.AllSource},
|
||||
HostIPCSources: []string{kubetypes.AllSource},
|
||||
NodeLabels: make(map[string]string),
|
||||
VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
RegisterNode: true,
|
||||
SeccompProfileRoot: filepath.Join(defaultRootDir, "seccomp"),
|
||||
HostNetworkSources: []string{kubetypes.AllSource},
|
||||
HostPIDSources: []string{kubetypes.AllSource},
|
||||
HostIPCSources: []string{kubetypes.AllSource},
|
||||
// TODO(#56523:v1.12.0): Remove --cadvisor-port, it has been deprecated since v1.10
|
||||
CAdvisorPort: 0,
|
||||
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
|
||||
|
|
@ -421,9 +417,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
|
|||
fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version")
|
||||
fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.")
|
||||
fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version")
|
||||
// TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated.
|
||||
fs.BoolVar(&f.EnableCustomMetrics, "enable-custom-metrics", f.EnableCustomMetrics, "Support for gathering custom metrics.")
|
||||
fs.MarkDeprecated("enable-custom-metrics", "will be removed in a future version")
|
||||
// TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated
|
||||
fs.BoolVar(&f.AllowPrivileged, "allow-privileged", f.AllowPrivileged, "If true, allow containers to request privileged mode. Default: true")
|
||||
fs.MarkDeprecated("allow-privileged", "will be removed in a future version")
|
||||
|
|
|
|||
|
|
@ -420,210 +420,3 @@ type HorizontalPodAutoscalerList struct {
|
|||
// Items is the list of horizontal pod autoscaler objects.
|
||||
Items []HorizontalPodAutoscaler
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects.
|
||||
type VerticalPodAutoscalerList struct {
|
||||
metav1.TypeMeta
|
||||
// metadata is the standard list metadata.
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// items is the list of vertical pod autoscaler objects.
|
||||
Items []VerticalPodAutoscaler
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VerticalPodAutoscaler is the configuration for a vertical pod
|
||||
// autoscaler, which automatically manages pod resources based on historical and
|
||||
// real time resource utilization.
|
||||
type VerticalPodAutoscaler struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Specification of the behavior of the autoscaler.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
Spec VerticalPodAutoscalerSpec
|
||||
|
||||
// Current information about the autoscaler.
|
||||
// +optional
|
||||
Status VerticalPodAutoscalerStatus
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler.
|
||||
type VerticalPodAutoscalerSpec struct {
|
||||
// A label query that determines the set of pods controlled by the Autoscaler.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
Selector *metav1.LabelSelector
|
||||
|
||||
// Describes the rules on how changes are applied to the pods.
|
||||
// If not specified, all fields in the `PodUpdatePolicy` are set to their
|
||||
// default values.
|
||||
// +optional
|
||||
UpdatePolicy *PodUpdatePolicy
|
||||
|
||||
// Controls how the autoscaler computes recommended resources.
|
||||
// The resource policy may be used to set constraints on the recommendations
|
||||
// for individual containers. If not specified, the autoscaler computes recommended
|
||||
// resources for all containers in the pod, without additional constraints.
|
||||
// +optional
|
||||
ResourcePolicy *PodResourcePolicy
|
||||
}
|
||||
|
||||
// PodUpdatePolicy describes the rules on how changes are applied to the pods.
|
||||
type PodUpdatePolicy struct {
|
||||
// Controls when autoscaler applies changes to the pod resources.
|
||||
// The default is 'Auto'.
|
||||
// +optional
|
||||
UpdateMode *UpdateMode
|
||||
}
|
||||
|
||||
// UpdateMode controls when autoscaler applies changes to the pod resoures.
|
||||
type UpdateMode string
|
||||
|
||||
const (
|
||||
// UpdateModeOff means that autoscaler never changes Pod resources.
|
||||
// The recommender still sets the recommended resources in the
|
||||
// VerticalPodAutoscaler object. This can be used for a "dry run".
|
||||
UpdateModeOff UpdateMode = "Off"
|
||||
// UpdateModeInitial means that autoscaler only assigns resources on pod
|
||||
// creation and does not change them during the lifetime of the pod.
|
||||
UpdateModeInitial UpdateMode = "Initial"
|
||||
// UpdateModeRecreate means that autoscaler assigns resources on pod
|
||||
// creation and additionally can update them during the lifetime of the
|
||||
// pod by deleting and recreating the pod.
|
||||
UpdateModeRecreate UpdateMode = "Recreate"
|
||||
// UpdateModeAuto means that autoscaler assigns resources on pod creation
|
||||
// and additionally can update them during the lifetime of the pod,
|
||||
// using any available update method. Currently this is equivalent to
|
||||
// Recreate, which is the only available update method.
|
||||
UpdateModeAuto UpdateMode = "Auto"
|
||||
)
|
||||
|
||||
// PodResourcePolicy controls how autoscaler computes the recommended resources
|
||||
// for containers belonging to the pod. There can be at most one entry for every
|
||||
// named container and optionally a single wildcard entry with `containerName` = '*',
|
||||
// which handles all containers that don't have individual policies.
|
||||
type PodResourcePolicy struct {
|
||||
// Per-container resource policies.
|
||||
// +optional
|
||||
// +patchMergeKey=containerName
|
||||
// +patchStrategy=merge
|
||||
ContainerPolicies []ContainerResourcePolicy
|
||||
}
|
||||
|
||||
// ContainerResourcePolicy controls how autoscaler computes the recommended
|
||||
// resources for a specific container.
|
||||
type ContainerResourcePolicy struct {
|
||||
// Name of the container or DefaultContainerResourcePolicy, in which
|
||||
// case the policy is used by the containers that don't have their own
|
||||
// policy specified.
|
||||
ContainerName string
|
||||
// Whether autoscaler is enabled for the container. The default is "Auto".
|
||||
// +optional
|
||||
Mode *ContainerScalingMode
|
||||
// Specifies the minimal amount of resources that will be recommended
|
||||
// for the container. The default is no minimum.
|
||||
// +optional
|
||||
MinAllowed api.ResourceList
|
||||
// Specifies the maximum amount of resources that will be recommended
|
||||
// for the container. The default is no maximum.
|
||||
// +optional
|
||||
MaxAllowed api.ResourceList
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultContainerResourcePolicy can be passed as
|
||||
// ContainerResourcePolicy.ContainerName to specify the default policy.
|
||||
DefaultContainerResourcePolicy = "*"
|
||||
)
|
||||
|
||||
// ContainerScalingMode controls whether autoscaler is enabled for a specific
|
||||
// container.
|
||||
type ContainerScalingMode string
|
||||
|
||||
const (
|
||||
// ContainerScalingModeAuto means autoscaling is enabled for a container.
|
||||
ContainerScalingModeAuto ContainerScalingMode = "Auto"
|
||||
// ContainerScalingModeOff means autoscaling is disabled for a container.
|
||||
ContainerScalingModeOff ContainerScalingMode = "Off"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.
|
||||
type VerticalPodAutoscalerStatus struct {
|
||||
// The most recently computed amount of resources recommended by the
|
||||
// autoscaler for the controlled pods.
|
||||
// +optional
|
||||
Recommendation *RecommendedPodResources
|
||||
|
||||
// Conditions is the set of conditions required for this autoscaler to scale its target,
|
||||
// and indicates whether or not those conditions are met.
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
Conditions []VerticalPodAutoscalerCondition
|
||||
}
|
||||
|
||||
// RecommendedPodResources is the recommendation of resources computed by
|
||||
// autoscaler. It contains a recommendation for each container in the pod
|
||||
// (except for those with `ContainerScalingMode` set to 'Off').
|
||||
type RecommendedPodResources struct {
|
||||
// Resources recommended by the autoscaler for each container.
|
||||
// +optional
|
||||
ContainerRecommendations []RecommendedContainerResources
|
||||
}
|
||||
|
||||
// RecommendedContainerResources is the recommendation of resources computed by
|
||||
// autoscaler for a specific container. Respects the container resource policy
|
||||
// if present in the spec. In particular the recommendation is not produced for
|
||||
// containers with `ContainerScalingMode` set to 'Off'.
|
||||
type RecommendedContainerResources struct {
|
||||
// Name of the container.
|
||||
ContainerName string
|
||||
// Recommended amount of resources.
|
||||
Target api.ResourceList
|
||||
// Minimum recommended amount of resources.
|
||||
// This amount is not guaranteed to be sufficient for the application to operate in a stable way, however
|
||||
// running with less resources is likely to have significant impact on performance/availability.
|
||||
// +optional
|
||||
LowerBound api.ResourceList
|
||||
// Maximum recommended amount of resources.
|
||||
// Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum
|
||||
// amount of application is actually capable of consuming.
|
||||
// +optional
|
||||
UpperBound api.ResourceList
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerConditionType are the valid conditions of
|
||||
// a VerticalPodAutoscaler.
|
||||
type VerticalPodAutoscalerConditionType string
|
||||
|
||||
var (
|
||||
// RecommendationProvided indicates whether the VPA recommender was able to calculate a recommendation.
|
||||
RecommendationProvided VerticalPodAutoscalerConditionType = "RecommendationProvided"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerCondition describes the state of
|
||||
// a VerticalPodAutoscaler at a certain point.
|
||||
type VerticalPodAutoscalerCondition struct {
|
||||
// type describes the current condition
|
||||
Type VerticalPodAutoscalerConditionType
|
||||
// status is the status of the condition (True, False, Unknown)
|
||||
Status api.ConditionStatus
|
||||
// lastTransitionTime is the last time the condition transitioned from
|
||||
// one status to another
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time
|
||||
// reason is the reason for the condition's last transition.
|
||||
// +optional
|
||||
Reason string
|
||||
// message is a human-readable explanation containing details about
|
||||
// the transition
|
||||
// +optional
|
||||
Message string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import (
|
|||
unsafe "unsafe"
|
||||
|
||||
v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
resource "k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
|
|
@ -41,8 +41,6 @@ func init() {
|
|||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v2beta1_ContainerResourcePolicy_To_autoscaling_ContainerResourcePolicy,
|
||||
Convert_autoscaling_ContainerResourcePolicy_To_v2beta1_ContainerResourcePolicy,
|
||||
Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference,
|
||||
Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference,
|
||||
Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource,
|
||||
|
|
@ -67,61 +65,17 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource,
|
||||
Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus,
|
||||
Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus,
|
||||
Convert_v2beta1_PodResourcePolicy_To_autoscaling_PodResourcePolicy,
|
||||
Convert_autoscaling_PodResourcePolicy_To_v2beta1_PodResourcePolicy,
|
||||
Convert_v2beta1_PodUpdatePolicy_To_autoscaling_PodUpdatePolicy,
|
||||
Convert_autoscaling_PodUpdatePolicy_To_v2beta1_PodUpdatePolicy,
|
||||
Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource,
|
||||
Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource,
|
||||
Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus,
|
||||
Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus,
|
||||
Convert_v2beta1_RecommendedContainerResources_To_autoscaling_RecommendedContainerResources,
|
||||
Convert_autoscaling_RecommendedContainerResources_To_v2beta1_RecommendedContainerResources,
|
||||
Convert_v2beta1_RecommendedPodResources_To_autoscaling_RecommendedPodResources,
|
||||
Convert_autoscaling_RecommendedPodResources_To_v2beta1_RecommendedPodResources,
|
||||
Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource,
|
||||
Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource,
|
||||
Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus,
|
||||
Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus,
|
||||
Convert_v2beta1_VerticalPodAutoscaler_To_autoscaling_VerticalPodAutoscaler,
|
||||
Convert_autoscaling_VerticalPodAutoscaler_To_v2beta1_VerticalPodAutoscaler,
|
||||
Convert_v2beta1_VerticalPodAutoscalerCondition_To_autoscaling_VerticalPodAutoscalerCondition,
|
||||
Convert_autoscaling_VerticalPodAutoscalerCondition_To_v2beta1_VerticalPodAutoscalerCondition,
|
||||
Convert_v2beta1_VerticalPodAutoscalerList_To_autoscaling_VerticalPodAutoscalerList,
|
||||
Convert_autoscaling_VerticalPodAutoscalerList_To_v2beta1_VerticalPodAutoscalerList,
|
||||
Convert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec,
|
||||
Convert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec,
|
||||
Convert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus,
|
||||
Convert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_ContainerResourcePolicy_To_autoscaling_ContainerResourcePolicy(in *v2beta1.ContainerResourcePolicy, out *autoscaling.ContainerResourcePolicy, s conversion.Scope) error {
|
||||
out.ContainerName = in.ContainerName
|
||||
out.Mode = (*autoscaling.ContainerScalingMode)(unsafe.Pointer(in.Mode))
|
||||
out.MinAllowed = *(*core.ResourceList)(unsafe.Pointer(&in.MinAllowed))
|
||||
out.MaxAllowed = *(*core.ResourceList)(unsafe.Pointer(&in.MaxAllowed))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_ContainerResourcePolicy_To_autoscaling_ContainerResourcePolicy is an autogenerated conversion function.
|
||||
func Convert_v2beta1_ContainerResourcePolicy_To_autoscaling_ContainerResourcePolicy(in *v2beta1.ContainerResourcePolicy, out *autoscaling.ContainerResourcePolicy, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_ContainerResourcePolicy_To_autoscaling_ContainerResourcePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_ContainerResourcePolicy_To_v2beta1_ContainerResourcePolicy(in *autoscaling.ContainerResourcePolicy, out *v2beta1.ContainerResourcePolicy, s conversion.Scope) error {
|
||||
out.ContainerName = in.ContainerName
|
||||
out.Mode = (*v2beta1.ContainerScalingMode)(unsafe.Pointer(in.Mode))
|
||||
out.MinAllowed = *(*v1.ResourceList)(unsafe.Pointer(&in.MinAllowed))
|
||||
out.MaxAllowed = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxAllowed))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_ContainerResourcePolicy_To_v2beta1_ContainerResourcePolicy is an autogenerated conversion function.
|
||||
func Convert_autoscaling_ContainerResourcePolicy_To_v2beta1_ContainerResourcePolicy(in *autoscaling.ContainerResourcePolicy, out *v2beta1.ContainerResourcePolicy, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_ContainerResourcePolicy_To_v2beta1_ContainerResourcePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *v2beta1.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
|
|
@ -148,7 +102,7 @@ func Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObje
|
|||
|
||||
func autoConvert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *v2beta1.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
|
||||
out.MetricName = in.MetricName
|
||||
out.MetricSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.MetricSelector = (*v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.TargetValue = (*resource.Quantity)(unsafe.Pointer(in.TargetValue))
|
||||
out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue))
|
||||
return nil
|
||||
|
|
@ -161,7 +115,7 @@ func Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in
|
|||
|
||||
func autoConvert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *v2beta1.ExternalMetricSource, s conversion.Scope) error {
|
||||
out.MetricName = in.MetricName
|
||||
out.MetricSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.MetricSelector = (*v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.TargetValue = (*resource.Quantity)(unsafe.Pointer(in.TargetValue))
|
||||
out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue))
|
||||
return nil
|
||||
|
|
@ -174,7 +128,7 @@ func Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(in
|
|||
|
||||
func autoConvert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *v2beta1.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
|
||||
out.MetricName = in.MetricName
|
||||
out.MetricSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.MetricSelector = (*v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.CurrentValue = in.CurrentValue
|
||||
out.CurrentAverageValue = (*resource.Quantity)(unsafe.Pointer(in.CurrentAverageValue))
|
||||
return nil
|
||||
|
|
@ -187,7 +141,7 @@ func Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in
|
|||
|
||||
func autoConvert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *v2beta1.ExternalMetricStatus, s conversion.Scope) error {
|
||||
out.MetricName = in.MetricName
|
||||
out.MetricSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.MetricSelector = (*v1.LabelSelector)(unsafe.Pointer(in.MetricSelector))
|
||||
out.CurrentValue = in.CurrentValue
|
||||
out.CurrentAverageValue = (*resource.Quantity)(unsafe.Pointer(in.CurrentAverageValue))
|
||||
return nil
|
||||
|
|
@ -246,7 +200,7 @@ func Convert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalP
|
|||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *v2beta1.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
|
||||
out.Type = v2beta1.HorizontalPodAutoscalerConditionType(in.Type)
|
||||
out.Status = v1.ConditionStatus(in.Status)
|
||||
out.Status = core_v1.ConditionStatus(in.Status)
|
||||
out.LastTransitionTime = in.LastTransitionTime
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
|
|
@ -312,7 +266,7 @@ func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAut
|
|||
|
||||
func autoConvert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *v2beta1.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
|
||||
out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.CurrentReplicas = in.CurrentReplicas
|
||||
out.DesiredReplicas = in.DesiredReplicas
|
||||
out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics))
|
||||
|
|
@ -327,7 +281,7 @@ func Convert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodA
|
|||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *v2beta1.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
|
||||
out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.CurrentReplicas = in.CurrentReplicas
|
||||
out.DesiredReplicas = in.DesiredReplicas
|
||||
out.CurrentMetrics = *(*[]v2beta1.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics))
|
||||
|
|
@ -452,46 +406,6 @@ func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *au
|
|||
return autoConvert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_PodResourcePolicy_To_autoscaling_PodResourcePolicy(in *v2beta1.PodResourcePolicy, out *autoscaling.PodResourcePolicy, s conversion.Scope) error {
|
||||
out.ContainerPolicies = *(*[]autoscaling.ContainerResourcePolicy)(unsafe.Pointer(&in.ContainerPolicies))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_PodResourcePolicy_To_autoscaling_PodResourcePolicy is an autogenerated conversion function.
|
||||
func Convert_v2beta1_PodResourcePolicy_To_autoscaling_PodResourcePolicy(in *v2beta1.PodResourcePolicy, out *autoscaling.PodResourcePolicy, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_PodResourcePolicy_To_autoscaling_PodResourcePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_PodResourcePolicy_To_v2beta1_PodResourcePolicy(in *autoscaling.PodResourcePolicy, out *v2beta1.PodResourcePolicy, s conversion.Scope) error {
|
||||
out.ContainerPolicies = *(*[]v2beta1.ContainerResourcePolicy)(unsafe.Pointer(&in.ContainerPolicies))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_PodResourcePolicy_To_v2beta1_PodResourcePolicy is an autogenerated conversion function.
|
||||
func Convert_autoscaling_PodResourcePolicy_To_v2beta1_PodResourcePolicy(in *autoscaling.PodResourcePolicy, out *v2beta1.PodResourcePolicy, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_PodResourcePolicy_To_v2beta1_PodResourcePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_PodUpdatePolicy_To_autoscaling_PodUpdatePolicy(in *v2beta1.PodUpdatePolicy, out *autoscaling.PodUpdatePolicy, s conversion.Scope) error {
|
||||
out.UpdateMode = (*autoscaling.UpdateMode)(unsafe.Pointer(in.UpdateMode))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_PodUpdatePolicy_To_autoscaling_PodUpdatePolicy is an autogenerated conversion function.
|
||||
func Convert_v2beta1_PodUpdatePolicy_To_autoscaling_PodUpdatePolicy(in *v2beta1.PodUpdatePolicy, out *autoscaling.PodUpdatePolicy, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_PodUpdatePolicy_To_autoscaling_PodUpdatePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_PodUpdatePolicy_To_v2beta1_PodUpdatePolicy(in *autoscaling.PodUpdatePolicy, out *v2beta1.PodUpdatePolicy, s conversion.Scope) error {
|
||||
out.UpdateMode = (*v2beta1.UpdateMode)(unsafe.Pointer(in.UpdateMode))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_PodUpdatePolicy_To_v2beta1_PodUpdatePolicy is an autogenerated conversion function.
|
||||
func Convert_autoscaling_PodUpdatePolicy_To_v2beta1_PodUpdatePolicy(in *autoscaling.PodUpdatePolicy, out *v2beta1.PodUpdatePolicy, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_PodUpdatePolicy_To_v2beta1_PodUpdatePolicy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *v2beta1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
|
||||
out.MetricName = in.MetricName
|
||||
out.TargetAverageValue = in.TargetAverageValue
|
||||
|
|
@ -536,52 +450,6 @@ func Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(in *autosc
|
|||
return autoConvert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_RecommendedContainerResources_To_autoscaling_RecommendedContainerResources(in *v2beta1.RecommendedContainerResources, out *autoscaling.RecommendedContainerResources, s conversion.Scope) error {
|
||||
out.ContainerName = in.ContainerName
|
||||
out.Target = *(*core.ResourceList)(unsafe.Pointer(&in.Target))
|
||||
out.LowerBound = *(*core.ResourceList)(unsafe.Pointer(&in.LowerBound))
|
||||
out.UpperBound = *(*core.ResourceList)(unsafe.Pointer(&in.UpperBound))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_RecommendedContainerResources_To_autoscaling_RecommendedContainerResources is an autogenerated conversion function.
|
||||
func Convert_v2beta1_RecommendedContainerResources_To_autoscaling_RecommendedContainerResources(in *v2beta1.RecommendedContainerResources, out *autoscaling.RecommendedContainerResources, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_RecommendedContainerResources_To_autoscaling_RecommendedContainerResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_RecommendedContainerResources_To_v2beta1_RecommendedContainerResources(in *autoscaling.RecommendedContainerResources, out *v2beta1.RecommendedContainerResources, s conversion.Scope) error {
|
||||
out.ContainerName = in.ContainerName
|
||||
out.Target = *(*v1.ResourceList)(unsafe.Pointer(&in.Target))
|
||||
out.LowerBound = *(*v1.ResourceList)(unsafe.Pointer(&in.LowerBound))
|
||||
out.UpperBound = *(*v1.ResourceList)(unsafe.Pointer(&in.UpperBound))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_RecommendedContainerResources_To_v2beta1_RecommendedContainerResources is an autogenerated conversion function.
|
||||
func Convert_autoscaling_RecommendedContainerResources_To_v2beta1_RecommendedContainerResources(in *autoscaling.RecommendedContainerResources, out *v2beta1.RecommendedContainerResources, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_RecommendedContainerResources_To_v2beta1_RecommendedContainerResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_RecommendedPodResources_To_autoscaling_RecommendedPodResources(in *v2beta1.RecommendedPodResources, out *autoscaling.RecommendedPodResources, s conversion.Scope) error {
|
||||
out.ContainerRecommendations = *(*[]autoscaling.RecommendedContainerResources)(unsafe.Pointer(&in.ContainerRecommendations))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_RecommendedPodResources_To_autoscaling_RecommendedPodResources is an autogenerated conversion function.
|
||||
func Convert_v2beta1_RecommendedPodResources_To_autoscaling_RecommendedPodResources(in *v2beta1.RecommendedPodResources, out *autoscaling.RecommendedPodResources, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_RecommendedPodResources_To_autoscaling_RecommendedPodResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_RecommendedPodResources_To_v2beta1_RecommendedPodResources(in *autoscaling.RecommendedPodResources, out *v2beta1.RecommendedPodResources, s conversion.Scope) error {
|
||||
out.ContainerRecommendations = *(*[]v2beta1.RecommendedContainerResources)(unsafe.Pointer(&in.ContainerRecommendations))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_RecommendedPodResources_To_v2beta1_RecommendedPodResources is an autogenerated conversion function.
|
||||
func Convert_autoscaling_RecommendedPodResources_To_v2beta1_RecommendedPodResources(in *autoscaling.RecommendedPodResources, out *v2beta1.RecommendedPodResources, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_RecommendedPodResources_To_v2beta1_RecommendedPodResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *v2beta1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
|
||||
out.Name = core.ResourceName(in.Name)
|
||||
out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization))
|
||||
|
|
@ -595,7 +463,7 @@ func Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in
|
|||
}
|
||||
|
||||
func autoConvert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *v2beta1.ResourceMetricSource, s conversion.Scope) error {
|
||||
out.Name = v1.ResourceName(in.Name)
|
||||
out.Name = core_v1.ResourceName(in.Name)
|
||||
out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization))
|
||||
out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue))
|
||||
return nil
|
||||
|
|
@ -619,7 +487,7 @@ func Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in
|
|||
}
|
||||
|
||||
func autoConvert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *v2beta1.ResourceMetricStatus, s conversion.Scope) error {
|
||||
out.Name = v1.ResourceName(in.Name)
|
||||
out.Name = core_v1.ResourceName(in.Name)
|
||||
out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization))
|
||||
out.CurrentAverageValue = in.CurrentAverageValue
|
||||
return nil
|
||||
|
|
@ -629,131 +497,3 @@ func autoConvert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatu
|
|||
func Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *v2beta1.ResourceMetricStatus, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_VerticalPodAutoscaler_To_autoscaling_VerticalPodAutoscaler(in *v2beta1.VerticalPodAutoscaler, out *autoscaling.VerticalPodAutoscaler, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_VerticalPodAutoscaler_To_autoscaling_VerticalPodAutoscaler is an autogenerated conversion function.
|
||||
func Convert_v2beta1_VerticalPodAutoscaler_To_autoscaling_VerticalPodAutoscaler(in *v2beta1.VerticalPodAutoscaler, out *autoscaling.VerticalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_VerticalPodAutoscaler_To_autoscaling_VerticalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_VerticalPodAutoscaler_To_v2beta1_VerticalPodAutoscaler(in *autoscaling.VerticalPodAutoscaler, out *v2beta1.VerticalPodAutoscaler, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_VerticalPodAutoscaler_To_v2beta1_VerticalPodAutoscaler is an autogenerated conversion function.
|
||||
func Convert_autoscaling_VerticalPodAutoscaler_To_v2beta1_VerticalPodAutoscaler(in *autoscaling.VerticalPodAutoscaler, out *v2beta1.VerticalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_VerticalPodAutoscaler_To_v2beta1_VerticalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_VerticalPodAutoscalerCondition_To_autoscaling_VerticalPodAutoscalerCondition(in *v2beta1.VerticalPodAutoscalerCondition, out *autoscaling.VerticalPodAutoscalerCondition, s conversion.Scope) error {
|
||||
out.Type = autoscaling.VerticalPodAutoscalerConditionType(in.Type)
|
||||
out.Status = core.ConditionStatus(in.Status)
|
||||
out.LastTransitionTime = in.LastTransitionTime
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_VerticalPodAutoscalerCondition_To_autoscaling_VerticalPodAutoscalerCondition is an autogenerated conversion function.
|
||||
func Convert_v2beta1_VerticalPodAutoscalerCondition_To_autoscaling_VerticalPodAutoscalerCondition(in *v2beta1.VerticalPodAutoscalerCondition, out *autoscaling.VerticalPodAutoscalerCondition, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_VerticalPodAutoscalerCondition_To_autoscaling_VerticalPodAutoscalerCondition(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_VerticalPodAutoscalerCondition_To_v2beta1_VerticalPodAutoscalerCondition(in *autoscaling.VerticalPodAutoscalerCondition, out *v2beta1.VerticalPodAutoscalerCondition, s conversion.Scope) error {
|
||||
out.Type = v2beta1.VerticalPodAutoscalerConditionType(in.Type)
|
||||
out.Status = v1.ConditionStatus(in.Status)
|
||||
out.LastTransitionTime = in.LastTransitionTime
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_VerticalPodAutoscalerCondition_To_v2beta1_VerticalPodAutoscalerCondition is an autogenerated conversion function.
|
||||
func Convert_autoscaling_VerticalPodAutoscalerCondition_To_v2beta1_VerticalPodAutoscalerCondition(in *autoscaling.VerticalPodAutoscalerCondition, out *v2beta1.VerticalPodAutoscalerCondition, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_VerticalPodAutoscalerCondition_To_v2beta1_VerticalPodAutoscalerCondition(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_VerticalPodAutoscalerList_To_autoscaling_VerticalPodAutoscalerList(in *v2beta1.VerticalPodAutoscalerList, out *autoscaling.VerticalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]autoscaling.VerticalPodAutoscaler)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_VerticalPodAutoscalerList_To_autoscaling_VerticalPodAutoscalerList is an autogenerated conversion function.
|
||||
func Convert_v2beta1_VerticalPodAutoscalerList_To_autoscaling_VerticalPodAutoscalerList(in *v2beta1.VerticalPodAutoscalerList, out *autoscaling.VerticalPodAutoscalerList, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_VerticalPodAutoscalerList_To_autoscaling_VerticalPodAutoscalerList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_VerticalPodAutoscalerList_To_v2beta1_VerticalPodAutoscalerList(in *autoscaling.VerticalPodAutoscalerList, out *v2beta1.VerticalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v2beta1.VerticalPodAutoscaler)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_VerticalPodAutoscalerList_To_v2beta1_VerticalPodAutoscalerList is an autogenerated conversion function.
|
||||
func Convert_autoscaling_VerticalPodAutoscalerList_To_v2beta1_VerticalPodAutoscalerList(in *autoscaling.VerticalPodAutoscalerList, out *v2beta1.VerticalPodAutoscalerList, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_VerticalPodAutoscalerList_To_v2beta1_VerticalPodAutoscalerList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec(in *v2beta1.VerticalPodAutoscalerSpec, out *autoscaling.VerticalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
|
||||
out.UpdatePolicy = (*autoscaling.PodUpdatePolicy)(unsafe.Pointer(in.UpdatePolicy))
|
||||
out.ResourcePolicy = (*autoscaling.PodResourcePolicy)(unsafe.Pointer(in.ResourcePolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec is an autogenerated conversion function.
|
||||
func Convert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec(in *v2beta1.VerticalPodAutoscalerSpec, out *autoscaling.VerticalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_VerticalPodAutoscalerSpec_To_autoscaling_VerticalPodAutoscalerSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec(in *autoscaling.VerticalPodAutoscalerSpec, out *v2beta1.VerticalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector))
|
||||
out.UpdatePolicy = (*v2beta1.PodUpdatePolicy)(unsafe.Pointer(in.UpdatePolicy))
|
||||
out.ResourcePolicy = (*v2beta1.PodResourcePolicy)(unsafe.Pointer(in.ResourcePolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec is an autogenerated conversion function.
|
||||
func Convert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec(in *autoscaling.VerticalPodAutoscalerSpec, out *v2beta1.VerticalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_VerticalPodAutoscalerSpec_To_v2beta1_VerticalPodAutoscalerSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus(in *v2beta1.VerticalPodAutoscalerStatus, out *autoscaling.VerticalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.Recommendation = (*autoscaling.RecommendedPodResources)(unsafe.Pointer(in.Recommendation))
|
||||
out.Conditions = *(*[]autoscaling.VerticalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus is an autogenerated conversion function.
|
||||
func Convert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus(in *v2beta1.VerticalPodAutoscalerStatus, out *autoscaling.VerticalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
return autoConvert_v2beta1_VerticalPodAutoscalerStatus_To_autoscaling_VerticalPodAutoscalerStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus(in *autoscaling.VerticalPodAutoscalerStatus, out *v2beta1.VerticalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.Recommendation = (*v2beta1.RecommendedPodResources)(unsafe.Pointer(in.Recommendation))
|
||||
out.Conditions = *(*[]v2beta1.VerticalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus is an autogenerated conversion function.
|
||||
func Convert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus(in *autoscaling.VerticalPodAutoscalerStatus, out *v2beta1.VerticalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_VerticalPodAutoscalerStatus_To_v2beta1_VerticalPodAutoscalerStatus(in, out, s)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ package v2beta1
|
|||
import (
|
||||
v2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
|
|
@ -36,10 +35,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error {
|
|||
scheme.AddTypeDefaultingFunc(&v2beta1.HorizontalPodAutoscalerList{}, func(obj interface{}) {
|
||||
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*v2beta1.HorizontalPodAutoscalerList))
|
||||
})
|
||||
scheme.AddTypeDefaultingFunc(&v2beta1.VerticalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_VerticalPodAutoscaler(obj.(*v2beta1.VerticalPodAutoscaler)) })
|
||||
scheme.AddTypeDefaultingFunc(&v2beta1.VerticalPodAutoscalerList{}, func(obj interface{}) {
|
||||
SetObjectDefaults_VerticalPodAutoscalerList(obj.(*v2beta1.VerticalPodAutoscalerList))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -53,28 +48,3 @@ func SetObjectDefaults_HorizontalPodAutoscalerList(in *v2beta1.HorizontalPodAuto
|
|||
SetObjectDefaults_HorizontalPodAutoscaler(a)
|
||||
}
|
||||
}
|
||||
|
||||
func SetObjectDefaults_VerticalPodAutoscaler(in *v2beta1.VerticalPodAutoscaler) {
|
||||
if in.Spec.ResourcePolicy != nil {
|
||||
for i := range in.Spec.ResourcePolicy.ContainerPolicies {
|
||||
a := &in.Spec.ResourcePolicy.ContainerPolicies[i]
|
||||
v1.SetDefaults_ResourceList(&a.MinAllowed)
|
||||
v1.SetDefaults_ResourceList(&a.MaxAllowed)
|
||||
}
|
||||
}
|
||||
if in.Status.Recommendation != nil {
|
||||
for i := range in.Status.Recommendation.ContainerRecommendations {
|
||||
a := &in.Status.Recommendation.ContainerRecommendations[i]
|
||||
v1.SetDefaults_ResourceList(&a.Target)
|
||||
v1.SetDefaults_ResourceList(&a.LowerBound)
|
||||
v1.SetDefaults_ResourceList(&a.UpperBound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SetObjectDefaults_VerticalPodAutoscalerList(in *v2beta1.VerticalPodAutoscalerList) {
|
||||
for i := range in.Items {
|
||||
a := &in.Items[i]
|
||||
SetObjectDefaults_VerticalPodAutoscaler(a)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
301
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
301
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
|
|
@ -23,48 +23,8 @@ package autoscaling
|
|||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ContainerResourcePolicy) DeepCopyInto(out *ContainerResourcePolicy) {
|
||||
*out = *in
|
||||
if in.Mode != nil {
|
||||
in, out := &in.Mode, &out.Mode
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ContainerScalingMode)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.MinAllowed != nil {
|
||||
in, out := &in.MinAllowed, &out.MinAllowed
|
||||
*out = make(core.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.MaxAllowed != nil {
|
||||
in, out := &in.MaxAllowed, &out.MaxAllowed
|
||||
*out = make(core.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourcePolicy.
|
||||
func (in *ContainerResourcePolicy) DeepCopy() *ContainerResourcePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ContainerResourcePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
|
||||
*out = *in
|
||||
|
|
@ -457,54 +417,6 @@ func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodResourcePolicy) DeepCopyInto(out *PodResourcePolicy) {
|
||||
*out = *in
|
||||
if in.ContainerPolicies != nil {
|
||||
in, out := &in.ContainerPolicies, &out.ContainerPolicies
|
||||
*out = make([]ContainerResourcePolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourcePolicy.
|
||||
func (in *PodResourcePolicy) DeepCopy() *PodResourcePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodResourcePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodUpdatePolicy) DeepCopyInto(out *PodUpdatePolicy) {
|
||||
*out = *in
|
||||
if in.UpdateMode != nil {
|
||||
in, out := &in.UpdateMode, &out.UpdateMode
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(UpdateMode)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodUpdatePolicy.
|
||||
func (in *PodUpdatePolicy) DeepCopy() *PodUpdatePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodUpdatePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
|
||||
*out = *in
|
||||
|
|
@ -539,66 +451,6 @@ func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RecommendedContainerResources) DeepCopyInto(out *RecommendedContainerResources) {
|
||||
*out = *in
|
||||
if in.Target != nil {
|
||||
in, out := &in.Target, &out.Target
|
||||
*out = make(core.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.LowerBound != nil {
|
||||
in, out := &in.LowerBound, &out.LowerBound
|
||||
*out = make(core.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.UpperBound != nil {
|
||||
in, out := &in.UpperBound, &out.UpperBound
|
||||
*out = make(core.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedContainerResources.
|
||||
func (in *RecommendedContainerResources) DeepCopy() *RecommendedContainerResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RecommendedContainerResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RecommendedPodResources) DeepCopyInto(out *RecommendedPodResources) {
|
||||
*out = *in
|
||||
if in.ContainerRecommendations != nil {
|
||||
in, out := &in.ContainerRecommendations, &out.ContainerRecommendations
|
||||
*out = make([]RecommendedContainerResources, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPodResources.
|
||||
func (in *RecommendedPodResources) DeepCopy() *RecommendedPodResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RecommendedPodResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
|
||||
*out = *in
|
||||
|
|
@ -718,156 +570,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler.
|
||||
func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscaler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerCondition) DeepCopyInto(out *VerticalPodAutoscalerCondition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCondition.
|
||||
func (in *VerticalPodAutoscalerCondition) DeepCopy() *VerticalPodAutoscalerCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopyInto(out *VerticalPodAutoscalerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VerticalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerList.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerSpec) DeepCopyInto(out *VerticalPodAutoscalerSpec) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.UpdatePolicy != nil {
|
||||
in, out := &in.UpdatePolicy, &out.UpdatePolicy
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PodUpdatePolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.ResourcePolicy != nil {
|
||||
in, out := &in.ResourcePolicy, &out.ResourcePolicy
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PodResourcePolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerSpec.
|
||||
func (in *VerticalPodAutoscalerSpec) DeepCopy() *VerticalPodAutoscalerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VerticalPodAutoscalerStatus) DeepCopyInto(out *VerticalPodAutoscalerStatus) {
|
||||
*out = *in
|
||||
if in.Recommendation != nil {
|
||||
in, out := &in.Recommendation, &out.Recommendation
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(RecommendedPodResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]VerticalPodAutoscalerCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerStatus.
|
||||
func (in *VerticalPodAutoscalerStatus) DeepCopy() *VerticalPodAutoscalerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VerticalPodAutoscalerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ go_library(
|
|||
"doc.go",
|
||||
"generated_expansion.go",
|
||||
"horizontalpodautoscaler.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion",
|
||||
deps = [
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ import (
|
|||
type AutoscalingInterface interface {
|
||||
RESTClient() rest.Interface
|
||||
HorizontalPodAutoscalersGetter
|
||||
VerticalPodAutoscalersGetter
|
||||
}
|
||||
|
||||
// AutoscalingClient is used to interact with features provided by the autoscaling group.
|
||||
|
|
@ -38,10 +37,6 @@ func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) Horizonta
|
|||
return newHorizontalPodAutoscalers(c, namespace)
|
||||
}
|
||||
|
||||
func (c *AutoscalingClient) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface {
|
||||
return newVerticalPodAutoscalers(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new AutoscalingClient for the given config.
|
||||
func NewForConfig(c *rest.Config) (*AutoscalingClient, error) {
|
||||
config := *c
|
||||
|
|
|
|||
|
|
@ -19,5 +19,3 @@ limitations under the License.
|
|||
package internalversion
|
||||
|
||||
type HorizontalPodAutoscalerExpansion interface{}
|
||||
|
||||
type VerticalPodAutoscalerExpansion interface{}
|
||||
|
|
|
|||
|
|
@ -1,174 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package internalversion
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalersGetter has a method to return a VerticalPodAutoscalerInterface.
|
||||
// A group's client should implement this interface.
|
||||
type VerticalPodAutoscalersGetter interface {
|
||||
VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerInterface has methods to work with VerticalPodAutoscaler resources.
|
||||
type VerticalPodAutoscalerInterface interface {
|
||||
Create(*autoscaling.VerticalPodAutoscaler) (*autoscaling.VerticalPodAutoscaler, error)
|
||||
Update(*autoscaling.VerticalPodAutoscaler) (*autoscaling.VerticalPodAutoscaler, error)
|
||||
UpdateStatus(*autoscaling.VerticalPodAutoscaler) (*autoscaling.VerticalPodAutoscaler, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*autoscaling.VerticalPodAutoscaler, error)
|
||||
List(opts v1.ListOptions) (*autoscaling.VerticalPodAutoscalerList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscaling.VerticalPodAutoscaler, err error)
|
||||
VerticalPodAutoscalerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalers implements VerticalPodAutoscalerInterface
|
||||
type verticalPodAutoscalers struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newVerticalPodAutoscalers returns a VerticalPodAutoscalers
|
||||
func newVerticalPodAutoscalers(c *AutoscalingClient, namespace string) *verticalPodAutoscalers {
|
||||
return &verticalPodAutoscalers{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the verticalPodAutoscaler, and returns the corresponding verticalPodAutoscaler object, and an error if there is any.
|
||||
func (c *verticalPodAutoscalers) Get(name string, options v1.GetOptions) (result *autoscaling.VerticalPodAutoscaler, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscaler{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of VerticalPodAutoscalers that match those selectors.
|
||||
func (c *verticalPodAutoscalers) List(opts v1.ListOptions) (result *autoscaling.VerticalPodAutoscalerList, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscalerList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested verticalPodAutoscalers.
|
||||
func (c *verticalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a verticalPodAutoscaler and creates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *verticalPodAutoscalers) Create(verticalPodAutoscaler *autoscaling.VerticalPodAutoscaler) (result *autoscaling.VerticalPodAutoscaler, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscaler{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a verticalPodAutoscaler and updates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any.
|
||||
func (c *verticalPodAutoscalers) Update(verticalPodAutoscaler *autoscaling.VerticalPodAutoscaler) (result *autoscaling.VerticalPodAutoscaler, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscaler{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(verticalPodAutoscaler.Name).
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *verticalPodAutoscalers) UpdateStatus(verticalPodAutoscaler *autoscaling.VerticalPodAutoscaler) (result *autoscaling.VerticalPodAutoscaler, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscaler{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(verticalPodAutoscaler.Name).
|
||||
SubResource("status").
|
||||
Body(verticalPodAutoscaler).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the verticalPodAutoscaler and deletes it. Returns an error if one occurs.
|
||||
func (c *verticalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *verticalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched verticalPodAutoscaler.
|
||||
func (c *verticalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscaling.VerticalPodAutoscaler, err error) {
|
||||
result = &autoscaling.VerticalPodAutoscaler{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("verticalpodautoscalers").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
@ -10,7 +10,6 @@ go_library(
|
|||
srcs = [
|
||||
"horizontalpodautoscaler.go",
|
||||
"interface.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion",
|
||||
deps = [
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ import (
|
|||
type Interface interface {
|
||||
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
|
||||
HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
|
||||
// VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer.
|
||||
VerticalPodAutoscalers() VerticalPodAutoscalerInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
|
|
@ -45,8 +43,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
|
|||
func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
|
||||
return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer.
|
||||
func (v *version) VerticalPodAutoscalers() VerticalPodAutoscalerInformer {
|
||||
return &verticalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package internalversion
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces"
|
||||
internalversion "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerInformer provides access to a shared informer and lister for
|
||||
// VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() internalversion.VerticalPodAutoscalerLister
|
||||
}
|
||||
|
||||
type verticalPodAutoscalerInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewVerticalPodAutoscalerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredVerticalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredVerticalPodAutoscalerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.Autoscaling().VerticalPodAutoscalers(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.Autoscaling().VerticalPodAutoscalers(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscaling.VerticalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredVerticalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscaling.VerticalPodAutoscaler{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *verticalPodAutoscalerInformer) Lister() internalversion.VerticalPodAutoscalerLister {
|
||||
return internalversion.NewVerticalPodAutoscalerLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
|
@ -81,8 +81,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||
// Group=autoscaling, Version=internalVersion
|
||||
case autoscaling.SchemeGroupVersion.WithResource("horizontalpodautoscalers"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().InternalVersion().HorizontalPodAutoscalers().Informer()}, nil
|
||||
case autoscaling.SchemeGroupVersion.WithResource("verticalpodautoscalers"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().InternalVersion().VerticalPodAutoscalers().Informer()}, nil
|
||||
|
||||
// Group=batch, Version=internalVersion
|
||||
case batch.SchemeGroupVersion.WithResource("cronjobs"):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ go_library(
|
|||
srcs = [
|
||||
"expansion_generated.go",
|
||||
"horizontalpodautoscaler.go",
|
||||
"verticalpodautoscaler.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion",
|
||||
deps = [
|
||||
|
|
|
|||
|
|
@ -25,11 +25,3 @@ type HorizontalPodAutoscalerListerExpansion interface{}
|
|||
// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
|
||||
// HorizontalPodAutoscalerNamespaceLister.
|
||||
type HorizontalPodAutoscalerNamespaceListerExpansion interface{}
|
||||
|
||||
// VerticalPodAutoscalerListerExpansion allows custom methods to be added to
|
||||
// VerticalPodAutoscalerLister.
|
||||
type VerticalPodAutoscalerListerExpansion interface{}
|
||||
|
||||
// VerticalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to
|
||||
// VerticalPodAutoscalerNamespaceLister.
|
||||
type VerticalPodAutoscalerNamespaceListerExpansion interface{}
|
||||
|
|
|
|||
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package internalversion
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
)
|
||||
|
||||
// VerticalPodAutoscalerLister helps list VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerLister interface {
|
||||
// List lists all VerticalPodAutoscalers in the indexer.
|
||||
List(selector labels.Selector) (ret []*autoscaling.VerticalPodAutoscaler, err error)
|
||||
// VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers.
|
||||
VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister
|
||||
VerticalPodAutoscalerListerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalerLister implements the VerticalPodAutoscalerLister interface.
|
||||
type verticalPodAutoscalerLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewVerticalPodAutoscalerLister returns a new VerticalPodAutoscalerLister.
|
||||
func NewVerticalPodAutoscalerLister(indexer cache.Indexer) VerticalPodAutoscalerLister {
|
||||
return &verticalPodAutoscalerLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all VerticalPodAutoscalers in the indexer.
|
||||
func (s *verticalPodAutoscalerLister) List(selector labels.Selector) (ret []*autoscaling.VerticalPodAutoscaler, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*autoscaling.VerticalPodAutoscaler))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers.
|
||||
func (s *verticalPodAutoscalerLister) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister {
|
||||
return verticalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// VerticalPodAutoscalerNamespaceLister helps list and get VerticalPodAutoscalers.
|
||||
type VerticalPodAutoscalerNamespaceLister interface {
|
||||
// List lists all VerticalPodAutoscalers in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*autoscaling.VerticalPodAutoscaler, err error)
|
||||
// Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name.
|
||||
Get(name string) (*autoscaling.VerticalPodAutoscaler, error)
|
||||
VerticalPodAutoscalerNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// verticalPodAutoscalerNamespaceLister implements the VerticalPodAutoscalerNamespaceLister
|
||||
// interface.
|
||||
type verticalPodAutoscalerNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all VerticalPodAutoscalers in the indexer for a given namespace.
|
||||
func (s verticalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*autoscaling.VerticalPodAutoscaler, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*autoscaling.VerticalPodAutoscaler))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name.
|
||||
func (s verticalPodAutoscalerNamespaceLister) Get(name string) (*autoscaling.VerticalPodAutoscaler, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(autoscaling.Resource("verticalpodautoscaler"), name)
|
||||
}
|
||||
return obj.(*autoscaling.VerticalPodAutoscaler), nil
|
||||
}
|
||||
|
|
@ -149,14 +149,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec
|
|||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
err := wait.PollImmediate(
|
||||
waitForReplicas.Interval,
|
||||
waitForReplicas.Timeout,
|
||||
scaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, int32(newSize)))
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
|
||||
}
|
||||
return err
|
||||
return WaitForScaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, newSize, waitForReplicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -177,3 +170,19 @@ func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupRe
|
|||
desiredReplicas == actualScale.Status.Replicas, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForScaleHasDesiredReplicas waits until condition scaleHasDesiredReplicas is satisfied
|
||||
// or returns error when timeout happens
|
||||
func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, newSize uint, waitForReplicas *RetryParams) error {
|
||||
if waitForReplicas == nil {
|
||||
return fmt.Errorf("waitForReplicas parameter cannot be nil!")
|
||||
}
|
||||
err := wait.PollImmediate(
|
||||
waitForReplicas.Interval,
|
||||
waitForReplicas.Timeout,
|
||||
scaleHasDesiredReplicas(sClient, gr, resourceName, namespace, int32(newSize)))
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package bootstrap
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
|
@ -35,6 +36,8 @@ import (
|
|||
"k8s.io/client-go/util/certificate/csr"
|
||||
)
|
||||
|
||||
const tmpPrivateKeyFile = "kubelet-client.key.tmp"
|
||||
|
||||
// LoadClientCert requests a client cert for kubelet if the kubeconfigPath file does not exist.
|
||||
// The kubeconfig at bootstrapPath is used to request a client certificate from the API server.
|
||||
// On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath.
|
||||
|
|
@ -75,9 +78,15 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string,
|
|||
}
|
||||
}
|
||||
}
|
||||
// Cache the private key in a separate file until CSR succeeds. This has to
|
||||
// be a separate file because store.CurrentPath() points to a symlink
|
||||
// managed by the store.
|
||||
privKeyPath := filepath.Join(certDir, tmpPrivateKeyFile)
|
||||
if !verifyKeyData(keyData) {
|
||||
glog.V(2).Infof("No valid private key found for bootstrapping, creating a new one")
|
||||
keyData, err = certutil.MakeEllipticPrivateKeyPEM()
|
||||
glog.V(2).Infof("No valid private key and/or certificate found, reusing existing private key or creating a new one")
|
||||
// Note: always call LoadOrGenerateKeyFile so that private key is
|
||||
// reused on next startup if CSR request fails.
|
||||
keyData, _, err = certutil.LoadOrGenerateKeyFile(privKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -90,6 +99,9 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string,
|
|||
if _, err := store.Update(certData, keyData); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(privKeyPath); err != nil && !os.IsNotExist(err) {
|
||||
glog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err)
|
||||
}
|
||||
|
||||
pemPath := store.CurrentPath()
|
||||
|
||||
|
|
@ -192,8 +204,6 @@ func verifyKeyData(data []byte) bool {
|
|||
if len(data) == 0 {
|
||||
return false
|
||||
}
|
||||
if _, err := certutil.ParsePrivateKeyPEM(data); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
_, err := certutil.ParsePrivateKeyPEM(data)
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,56 +21,67 @@ go_library(
|
|||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"docker_image_linux.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_linux.go",
|
||||
"helpers_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_sandbox_others.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"docker_image_windows.go",
|
||||
"docker_sandbox_windows.go",
|
||||
"docker_stats_windows.go",
|
||||
"helpers_windows.go",
|
||||
],
|
||||
|
|
|
|||
22
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
22
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
|
|
@ -412,9 +412,8 @@ func (ds *dockerService) PodSandboxStatus(ctx context.Context, req *runtimeapi.P
|
|||
|
||||
var IP string
|
||||
// TODO: Remove this when sandbox is available on windows
|
||||
// Currently windows supports both sandbox and non-sandbox cases.
|
||||
// This is a workaround for windows, where sandbox is not in use, and pod IP is determined through containers belonging to the Pod.
|
||||
if IP = ds.determinePodIPBySandboxID(podSandboxID, r); IP == "" {
|
||||
if IP = ds.determinePodIPBySandboxID(podSandboxID); IP == "" {
|
||||
IP = ds.getIP(podSandboxID, r)
|
||||
}
|
||||
|
||||
|
|
@ -539,21 +538,6 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod
|
|||
return &runtimeapi.ListPodSandboxResponse{Items: result}, nil
|
||||
}
|
||||
|
||||
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
|
||||
if lc == nil {
|
||||
return nil
|
||||
}
|
||||
// Apply security context.
|
||||
if err := applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set sysctls.
|
||||
hc.Sysctls = lc.Sysctls
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) applySandboxResources(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig) error {
|
||||
hc.Resources = dockercontainer.Resources{
|
||||
MemorySwap: DefaultMemorySwap(),
|
||||
|
|
@ -594,8 +578,8 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
|||
HostConfig: hc,
|
||||
}
|
||||
|
||||
// Apply linux-specific options.
|
||||
if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSeparator); err != nil {
|
||||
// Apply platform-specific options.
|
||||
if err := ds.applySandboxPlatformOptions(hc, c, createConfig, image, securityOptSeparator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
42
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_others.go
generated
vendored
Normal file
42
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_others.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// applySandboxPlatformOptions applies platform specific options to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
func (ds *dockerService) applySandboxPlatformOptions(hc *dockercontainer.HostConfig, config *runtimeapi.PodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
|
||||
lc := config.GetLinux()
|
||||
if lc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply security context.
|
||||
if err := applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set sysctls.
|
||||
hc.Sysctls = lc.Sysctls
|
||||
return nil
|
||||
}
|
||||
39
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_windows.go
generated
vendored
Normal file
39
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// applySandboxPlatformOptions applies platform specific options to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
func (ds *dockerService) applySandboxPlatformOptions(hc *dockercontainer.HostConfig, config *runtimeapi.PodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
|
||||
dnsConfig := config.GetDnsConfig()
|
||||
if dnsConfig == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup DNS.
|
||||
hc.DNS = dnsConfig.GetServers()
|
||||
hc.DNSSearch = dnsConfig.GetSearches()
|
||||
hc.DNSOptions = dnsConfig.GetOptions()
|
||||
return nil
|
||||
}
|
||||
2
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go
generated
vendored
2
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go
generated
vendored
|
|
@ -136,7 +136,7 @@ func (ds *dockerService) updateCreateConfig(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) determinePodIPBySandboxID(uid string, sandbox *dockertypes.ContainerJSON) string {
|
||||
func (ds *dockerService) determinePodIPBySandboxID(uid string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ func (ds *dockerService) updateCreateConfig(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) determinePodIPBySandboxID(uid string, sandbox *dockertypes.ContainerJSON) string {
|
||||
func (ds *dockerService) determinePodIPBySandboxID(uid string) string {
|
||||
glog.Warningf("determinePodIPBySandboxID is unsupported in this build")
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
68
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go
generated
vendored
68
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go
generated
vendored
|
|
@ -97,28 +97,7 @@ func applyWindowsContainerSecurityContext(wsc *runtimeapi.WindowsContainerSecuri
|
|||
}
|
||||
}
|
||||
|
||||
func (ds *dockerService) determinePodIPBySandboxID(sandboxID string, sandbox *dockertypes.ContainerJSON) string {
|
||||
// Versions and feature support
|
||||
// ============================
|
||||
// Windows version >= Windows Server, Version 1709, Supports both sandbox and non-sandbox case
|
||||
// Windows version == Windows Server 2016 Support only non-sandbox case
|
||||
// Windows version < Windows Server 2016 is Not Supported
|
||||
|
||||
// Sandbox support in Windows mandates CNI Plugin.
|
||||
// Presence of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
|
||||
// Hyper-V isolated containers are also considered as non-Sandbox cases
|
||||
|
||||
// Todo: Add a kernel version check for more validation
|
||||
|
||||
// Hyper-V only supports one container per Pod yet and the container will have a different
|
||||
// IP address from sandbox. Retrieve the IP from the containers as this is a non-Sandbox case.
|
||||
// TODO(feiskyer): remove this workaround after Hyper-V supports multiple containers per Pod.
|
||||
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" && sandbox.HostConfig.Isolation != kubeletapis.HypervIsolationValue {
|
||||
// Sandbox case, fetch the IP from the sandbox container.
|
||||
return ds.getIP(sandboxID, sandbox)
|
||||
}
|
||||
|
||||
// Non-Sandbox case, fetch the IP from the containers within the Pod.
|
||||
func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) string {
|
||||
opts := dockertypes.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: dockerfilters.NewArgs(),
|
||||
|
|
@ -138,8 +117,49 @@ func (ds *dockerService) determinePodIPBySandboxID(sandboxID string, sandbox *do
|
|||
continue
|
||||
}
|
||||
|
||||
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
|
||||
return containerIP
|
||||
// Versions and feature support
|
||||
// ============================
|
||||
// Windows version == Windows Server, Version 1709,, Supports both sandbox and non-sandbox case
|
||||
// Windows version == Windows Server 2016 Support only non-sandbox case
|
||||
// Windows version < Windows Server 2016 is Not Supported
|
||||
|
||||
// Sandbox support in Windows mandates CNI Plugin.
|
||||
// Presence of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
|
||||
|
||||
// Todo: Add a kernel version check for more validation
|
||||
|
||||
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
|
||||
// On Windows, every container that is created in a Sandbox, needs to invoke CNI plugin again for adding the Network,
|
||||
// with the shared container name as NetNS info,
|
||||
// This is passed down to the platform to replicate some necessary information to the new container
|
||||
|
||||
//
|
||||
// This place is chosen as a hack for now, since ds.getIP would end up calling CNI's addToNetwork
|
||||
// That is why addToNetwork is required to be idempotent
|
||||
|
||||
// Instead of relying on this call, an explicit call to addToNetwork should be
|
||||
// done immediately after ContainerCreation, in case of Windows only. TBD Issue # to handle this
|
||||
|
||||
if r.HostConfig.Isolation == kubeletapis.HypervIsolationValue {
|
||||
// Hyper-V only supports one container per Pod yet and the container will have a different
|
||||
// IP address from sandbox. Return the first non-sandbox container IP as POD IP.
|
||||
// TODO(feiskyer): remove this workaround after Hyper-V supports multiple containers per Pod.
|
||||
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
|
||||
return containerIP
|
||||
}
|
||||
} else {
|
||||
// Do not return any IP, so that we would continue and get the IP of the Sandbox.
|
||||
// Windows 1709 and 1803 doesn't have the Namespace support, so getIP() is called
|
||||
// to replicate the DNS registry key to the Workload container (IP/Gateway/MAC is
|
||||
// set separately than DNS).
|
||||
// TODO(feiskyer): remove this workaround after Namespace is supported in Windows RS5.
|
||||
ds.getIP(sandboxID, r)
|
||||
}
|
||||
} else {
|
||||
// ds.getIP will call the CNI plugin to fetch the IP
|
||||
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
|
||||
return containerIP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
1
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS
generated
vendored
1
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/OWNERS
generated
vendored
|
|
@ -3,6 +3,7 @@ approvers:
|
|||
- dchen1107
|
||||
- matchstick
|
||||
- freehan
|
||||
- dcbw
|
||||
reviewers:
|
||||
- sig-network-reviewers
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ go_test(
|
|||
srcs = [
|
||||
"eviction_manager_test.go",
|
||||
"helpers_test.go",
|
||||
"memory_threshold_notifier_test.go",
|
||||
"mock_threshold_notifier_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
|
@ -20,6 +22,7 @@ go_test(
|
|||
"//pkg/kubelet/eviction/api:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
@ -36,6 +39,7 @@ go_library(
|
|||
"doc.go",
|
||||
"eviction_manager.go",
|
||||
"helpers.go",
|
||||
"memory_threshold_notifier.go",
|
||||
"types.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
|
|
|
|||
121
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
121
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go
generated
vendored
|
|
@ -19,7 +19,6 @@ package eviction
|
|||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -35,7 +34,6 @@ import (
|
|||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
|
|
@ -85,10 +83,12 @@ type managerImpl struct {
|
|||
signalToNodeReclaimFuncs map[evictionapi.Signal]nodeReclaimFuncs
|
||||
// last observations from synchronize
|
||||
lastObservations signalObservations
|
||||
// notifierStopCh is a channel used to stop all thresholdNotifiers
|
||||
notifierStopCh ThresholdStopCh
|
||||
// dedicatedImageFs indicates if imagefs is on a separate device from the rootfs
|
||||
dedicatedImageFs *bool
|
||||
// thresholdNotifiers is a list of memory threshold notifiers which each notify for a memory eviction threshold
|
||||
thresholdNotifiers []ThresholdNotifier
|
||||
// thresholdsLastUpdated is the last time the thresholdNotifiers were updated.
|
||||
thresholdsLastUpdated time.Time
|
||||
}
|
||||
|
||||
// ensure it implements the required interface
|
||||
|
|
@ -116,8 +116,8 @@ func NewManager(
|
|||
nodeRef: nodeRef,
|
||||
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
||||
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
||||
notifierStopCh: NewInitialStopCh(clock),
|
||||
dedicatedImageFs: nil,
|
||||
thresholdNotifiers: []ThresholdNotifier{},
|
||||
}
|
||||
return manager, manager
|
||||
}
|
||||
|
|
@ -157,12 +157,29 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
|||
return lifecycle.PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: Reason,
|
||||
Message: fmt.Sprintf(message, m.nodeConditions),
|
||||
Message: fmt.Sprintf(nodeLowMessageFmt, m.nodeConditions),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the control loop to observe and response to low compute resources.
|
||||
func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, monitoringInterval time.Duration) {
|
||||
thresholdHandler := func(message string) {
|
||||
glog.Infof(message)
|
||||
m.synchronize(diskInfoProvider, podFunc)
|
||||
}
|
||||
if m.config.KernelMemcgNotification {
|
||||
for _, threshold := range m.config.Thresholds {
|
||||
if threshold.Signal == evictionapi.SignalMemoryAvailable || threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable {
|
||||
notifier, err := NewMemoryThresholdNotifier(threshold, m.config.PodCgroupRoot, &CgroupNotifierFactory{}, thresholdHandler)
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: failed to create memory threshold notifier: %v", err)
|
||||
} else {
|
||||
go notifier.Start()
|
||||
m.thresholdNotifiers = append(m.thresholdNotifiers, notifier)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// start the eviction manager monitoring
|
||||
go func() {
|
||||
for {
|
||||
|
|
@ -197,51 +214,6 @@ func (m *managerImpl) IsUnderPIDPressure() bool {
|
|||
return hasNodeCondition(m.nodeConditions, v1.NodePIDPressure)
|
||||
}
|
||||
|
||||
func (m *managerImpl) startMemoryThresholdNotifier(summary *statsapi.Summary, hard, allocatable bool, handler thresholdNotifierHandlerFunc) error {
|
||||
for _, threshold := range m.config.Thresholds {
|
||||
if threshold.Signal != evictionapi.SignalMemoryAvailable || hard != isHardEvictionThreshold(threshold) {
|
||||
continue
|
||||
}
|
||||
cgroups, err := cm.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cgpath, found := cgroups.MountPoints["memory"]
|
||||
if !found || len(cgpath) == 0 {
|
||||
return fmt.Errorf("memory cgroup mount point not found")
|
||||
}
|
||||
attribute := "memory.usage_in_bytes"
|
||||
memoryStats := summary.Node.Memory
|
||||
if allocatable {
|
||||
cgpath += m.config.PodCgroupRoot
|
||||
allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memoryStats = allocatableContainer.Memory
|
||||
}
|
||||
if memoryStats == nil || memoryStats.UsageBytes == nil || memoryStats.WorkingSetBytes == nil || memoryStats.AvailableBytes == nil {
|
||||
return fmt.Errorf("summary was incomplete")
|
||||
}
|
||||
// Set threshold on usage to capacity - eviction_hard + inactive_file,
|
||||
// since we want to be notified when working_set = capacity - eviction_hard
|
||||
inactiveFile := resource.NewQuantity(int64(*memoryStats.UsageBytes-*memoryStats.WorkingSetBytes), resource.BinarySI)
|
||||
capacity := resource.NewQuantity(int64(*memoryStats.AvailableBytes+*memoryStats.WorkingSetBytes), resource.BinarySI)
|
||||
evictionThresholdQuantity := evictionapi.GetThresholdQuantity(threshold.Value, capacity)
|
||||
memcgThreshold := capacity.DeepCopy()
|
||||
memcgThreshold.Sub(*evictionThresholdQuantity)
|
||||
memcgThreshold.Add(*inactiveFile)
|
||||
description := fmt.Sprintf("<%s available", formatThresholdValue(threshold.Value))
|
||||
memcgThresholdNotifier, err := NewMemCGThresholdNotifier(cgpath, attribute, strconv.FormatInt(memcgThreshold.Value(), 10), description, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go memcgThresholdNotifier.Start(m.notifierStopCh)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// synchronize is the main control loop that enforces eviction thresholds.
|
||||
// Returns the pod that was killed, or nil if no pod was killed.
|
||||
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod {
|
||||
|
|
@ -272,41 +244,12 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
|||
return nil
|
||||
}
|
||||
|
||||
// attempt to create a threshold notifier to improve eviction response time
|
||||
if m.config.KernelMemcgNotification && m.notifierStopCh.Reset() {
|
||||
glog.V(4).Infof("eviction manager attempting to integrate with kernel memcg notification api")
|
||||
// start soft memory notification
|
||||
err = m.startMemoryThresholdNotifier(summary, false, false, func(desc string) {
|
||||
glog.Infof("soft memory eviction threshold crossed at %s", desc)
|
||||
// TODO wait grace period for soft memory limit
|
||||
m.synchronize(diskInfoProvider, podFunc)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: failed to create soft memory threshold notifier: %v", err)
|
||||
} // start soft memory notification
|
||||
err = m.startMemoryThresholdNotifier(summary, false, true, func(desc string) {
|
||||
glog.Infof("soft allocatable memory eviction threshold crossed at %s", desc)
|
||||
// TODO wait grace period for soft memory limit
|
||||
m.synchronize(diskInfoProvider, podFunc)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: failed to create allocatable soft memory threshold notifier: %v", err)
|
||||
}
|
||||
// start hard memory notification
|
||||
err = m.startMemoryThresholdNotifier(summary, true, false, func(desc string) {
|
||||
glog.Infof("hard memory eviction threshold crossed at %s", desc)
|
||||
m.synchronize(diskInfoProvider, podFunc)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: failed to create hard memory threshold notifier: %v", err)
|
||||
}
|
||||
// start hard memory notification
|
||||
err = m.startMemoryThresholdNotifier(summary, true, true, func(desc string) {
|
||||
glog.Infof("hard allocatable memory eviction threshold crossed at %s", desc)
|
||||
m.synchronize(diskInfoProvider, podFunc)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: failed to create hard allocatable memory threshold notifier: %v", err)
|
||||
if m.clock.Since(m.thresholdsLastUpdated) > notifierRefreshInterval {
|
||||
m.thresholdsLastUpdated = m.clock.Now()
|
||||
for _, notifier := range m.thresholdNotifiers {
|
||||
if err := notifier.UpdateThreshold(summary); err != nil {
|
||||
glog.Warningf("eviction manager: failed to update %s: %v", notifier.Description(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -535,7 +478,7 @@ func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1.
|
|||
used := podVolumeUsed[pod.Spec.Volumes[i].Name]
|
||||
if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 {
|
||||
// the emptyDir usage exceeds the size limit, evict the pod
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessage, pod.Spec.Volumes[i].Name, size.String()), nil)
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessageFmt, pod.Spec.Volumes[i].Name, size.String()), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -567,7 +510,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat
|
|||
podEphemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
|
||||
if podEphemeralStorageTotalUsage.Cmp(podEphemeralStorageLimit) > 0 {
|
||||
// the total usage of pod exceeds the total size limit of containers, evict the pod
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(podEphemeralStorageMessage, podEphemeralStorageLimit.String()), nil)
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(podEphemeralStorageMessageFmt, podEphemeralStorageLimit.String()), nil)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -589,7 +532,7 @@ func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.P
|
|||
|
||||
if ephemeralStorageThreshold, ok := thresholdsMap[containerStat.Name]; ok {
|
||||
if ephemeralStorageThreshold.Cmp(*containerUsed) < 0 {
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessage, containerStat.Name, ephemeralStorageThreshold.String()), nil)
|
||||
return m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessageFmt, containerStat.Name, ephemeralStorageThreshold.String()), nil)
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,13 +21,11 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
|
|
@ -40,21 +38,18 @@ const (
|
|||
unsupportedEvictionSignal = "unsupported eviction signal %v"
|
||||
// Reason is the reason reported back in status.
|
||||
Reason = "Evicted"
|
||||
// the message associated with the reason.
|
||||
message = "The node was low on resource: %v. "
|
||||
// additional information for containers exceeding requests
|
||||
containerMessage = "Container %s was using %s, which exceeds its request of %s. "
|
||||
// additional information for containers which have exceeded their ES limit
|
||||
containerEphemeralStorageMessage = "Container %s exceeded its local ephemeral storage limit %q. "
|
||||
// additional information for pods which have exceeded their ES limit
|
||||
podEphemeralStorageMessage = "Pod ephemeral local storage usage exceeds the total limit of containers %s. "
|
||||
// additional information for empty-dir volumes which have exceeded their size limit
|
||||
emptyDirMessage = "Usage of EmptyDir volume %q exceeds the limit %q. "
|
||||
// nodeLowMessageFmt is the message for evictions due to resource pressure.
|
||||
nodeLowMessageFmt = "The node was low on resource: %v. "
|
||||
// containerMessageFmt provides additional information for containers exceeding requests
|
||||
containerMessageFmt = "Container %s was using %s, which exceeds its request of %s. "
|
||||
// containerEphemeralStorageMessageFmt provides additional information for containers which have exceeded their ES limit
|
||||
containerEphemeralStorageMessageFmt = "Container %s exceeded its local ephemeral storage limit %q. "
|
||||
// podEphemeralStorageMessageFmt provides additional information for pods which have exceeded their ES limit
|
||||
podEphemeralStorageMessageFmt = "Pod ephemeral local storage usage exceeds the total limit of containers %s. "
|
||||
// emptyDirMessageFmt provides additional information for empty-dir volumes which have exceeded their size limit
|
||||
emptyDirMessageFmt = "Usage of EmptyDir volume %q exceeds the limit %q. "
|
||||
// inodes, number. internal to this module, used to account for local disk inode consumption.
|
||||
resourceInodes v1.ResourceName = "inodes"
|
||||
// this prevents constantly updating the memcg notifier if synchronize
|
||||
// is run frequently.
|
||||
notifierRefreshInterval = 10 * time.Second
|
||||
// OffendingContainersKey is the key in eviction event annotations for the list of container names which exceeded their requests
|
||||
OffendingContainersKey = "offending_containers"
|
||||
// OffendingContainersUsageKey is the key in eviction event annotations for the list of usage of containers which exceeded their requests
|
||||
|
|
@ -1007,6 +1002,10 @@ func isHardEvictionThreshold(threshold evictionapi.Threshold) bool {
|
|||
return threshold.GracePeriod == time.Duration(0)
|
||||
}
|
||||
|
||||
func isAllocatableEvictionThreshold(threshold evictionapi.Threshold) bool {
|
||||
return threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable
|
||||
}
|
||||
|
||||
// buildSignalToRankFunc returns ranking functions associated with resources
|
||||
func buildSignalToRankFunc(withImageFs bool) map[evictionapi.Signal]rankFunc {
|
||||
signalToRankFunc := map[evictionapi.Signal]rankFunc{
|
||||
|
|
@ -1062,7 +1061,7 @@ func buildSignalToNodeReclaimFuncs(imageGC ImageGC, containerGC ContainerGC, wit
|
|||
// evictionMessage constructs a useful message about why an eviction occurred, and annotations to provide metadata about the eviction
|
||||
func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats statsFunc) (message string, annotations map[string]string) {
|
||||
annotations = make(map[string]string)
|
||||
message = fmt.Sprintf(message, resourceToReclaim)
|
||||
message = fmt.Sprintf(nodeLowMessageFmt, resourceToReclaim)
|
||||
containers := []string{}
|
||||
containerUsage := []string{}
|
||||
podStats, ok := stats(pod)
|
||||
|
|
@ -1085,7 +1084,7 @@ func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats stats
|
|||
}
|
||||
}
|
||||
if usage != nil && usage.Cmp(requests) > 0 {
|
||||
message += fmt.Sprintf(containerMessage, container.Name, usage.String(), requests.String())
|
||||
message += fmt.Sprintf(containerMessageFmt, container.Name, usage.String(), requests.String())
|
||||
containers = append(containers, container.Name)
|
||||
containerUsage = append(containerUsage, usage.String())
|
||||
}
|
||||
|
|
@ -1097,38 +1096,3 @@ func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats stats
|
|||
annotations[StarvedResourceKey] = string(resourceToReclaim)
|
||||
return
|
||||
}
|
||||
|
||||
// thresholdStopCh is a ThresholdStopCh which can only be closed after notifierRefreshInterval time has passed
|
||||
type thresholdStopCh struct {
|
||||
lock sync.Mutex
|
||||
ch chan struct{}
|
||||
startTime time.Time
|
||||
// used to track time
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewInitialStopCh returns a ThresholdStopCh which can be closed immediately
|
||||
func NewInitialStopCh(clock clock.Clock) ThresholdStopCh {
|
||||
return &thresholdStopCh{ch: make(chan struct{}), clock: clock}
|
||||
}
|
||||
|
||||
// implements ThresholdStopCh.Reset
|
||||
func (t *thresholdStopCh) Reset() (closed bool) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
closed = t.clock.Since(t.startTime) > notifierRefreshInterval
|
||||
if closed {
|
||||
// close the old channel and reopen a new one
|
||||
close(t.ch)
|
||||
t.startTime = t.clock.Now()
|
||||
t.ch = make(chan struct{})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// implements ThresholdStopCh.Ch
|
||||
func (t *thresholdStopCh) Ch() <-chan struct{} {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
return t.ch
|
||||
}
|
||||
|
|
|
|||
135
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go
generated
vendored
Normal file
135
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go
generated
vendored
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryUsageAttribute = "memory.usage_in_bytes"
|
||||
// this prevents constantly updating the memcg notifier if synchronize
|
||||
// is run frequently.
|
||||
notifierRefreshInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
type memoryThresholdNotifier struct {
|
||||
threshold evictionapi.Threshold
|
||||
cgroupPath string
|
||||
events chan struct{}
|
||||
factory NotifierFactory
|
||||
handler func(string)
|
||||
notifier CgroupNotifier
|
||||
}
|
||||
|
||||
var _ ThresholdNotifier = &memoryThresholdNotifier{}
|
||||
|
||||
// NewMemoryThresholdNotifier creates a ThresholdNotifier which is designed to respond to the given threshold.
|
||||
// UpdateThreshold must be called once before the threshold will be active.
|
||||
func NewMemoryThresholdNotifier(threshold evictionapi.Threshold, cgroupRoot string, factory NotifierFactory, handler func(string)) (ThresholdNotifier, error) {
|
||||
cgroups, err := cm.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cgpath, found := cgroups.MountPoints["memory"]
|
||||
if !found || len(cgpath) == 0 {
|
||||
return nil, fmt.Errorf("memory cgroup mount point not found")
|
||||
}
|
||||
if isAllocatableEvictionThreshold(threshold) {
|
||||
// for allocatable thresholds, point the cgroup notifier at the allocatable cgroup
|
||||
cgpath += cgroupRoot
|
||||
}
|
||||
return &memoryThresholdNotifier{
|
||||
threshold: threshold,
|
||||
cgroupPath: cgpath,
|
||||
events: make(chan struct{}),
|
||||
handler: handler,
|
||||
factory: factory,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *memoryThresholdNotifier) Start() {
|
||||
glog.Infof("eviction manager: created %s", m.Description())
|
||||
for range m.events {
|
||||
m.handler(fmt.Sprintf("eviction manager: %s crossed", m.Description()))
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memoryThresholdNotifier) UpdateThreshold(summary *statsapi.Summary) error {
|
||||
memoryStats := summary.Node.Memory
|
||||
if isAllocatableEvictionThreshold(m.threshold) {
|
||||
allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memoryStats = allocatableContainer.Memory
|
||||
}
|
||||
if memoryStats == nil || memoryStats.UsageBytes == nil || memoryStats.WorkingSetBytes == nil || memoryStats.AvailableBytes == nil {
|
||||
return fmt.Errorf("summary was incomplete. Expected MemoryStats and all subfields to be non-nil, but got %+v", memoryStats)
|
||||
}
|
||||
// Set threshold on usage to capacity - eviction_hard + inactive_file,
|
||||
// since we want to be notified when working_set = capacity - eviction_hard
|
||||
inactiveFile := resource.NewQuantity(int64(*memoryStats.UsageBytes-*memoryStats.WorkingSetBytes), resource.BinarySI)
|
||||
capacity := resource.NewQuantity(int64(*memoryStats.AvailableBytes+*memoryStats.WorkingSetBytes), resource.BinarySI)
|
||||
evictionThresholdQuantity := evictionapi.GetThresholdQuantity(m.threshold.Value, capacity)
|
||||
memcgThreshold := capacity.DeepCopy()
|
||||
memcgThreshold.Sub(*evictionThresholdQuantity)
|
||||
memcgThreshold.Add(*inactiveFile)
|
||||
|
||||
glog.V(3).Infof("eviction manager: setting %s to %s\n", m.Description(), memcgThreshold.String())
|
||||
if m.notifier != nil {
|
||||
m.notifier.Stop()
|
||||
}
|
||||
newNotifier, err := m.factory.NewCgroupNotifier(m.cgroupPath, memoryUsageAttribute, memcgThreshold.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.notifier = newNotifier
|
||||
go m.notifier.Start(m.events)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryThresholdNotifier) Description() string {
|
||||
var hard, allocatable string
|
||||
if isHardEvictionThreshold(m.threshold) {
|
||||
hard = "hard "
|
||||
} else {
|
||||
hard = "soft "
|
||||
}
|
||||
if isAllocatableEvictionThreshold(m.threshold) {
|
||||
allocatable = "allocatable "
|
||||
}
|
||||
return fmt.Sprintf("%s%smemory eviction threshold", hard, allocatable)
|
||||
}
|
||||
|
||||
var _ NotifierFactory = &CgroupNotifierFactory{}
|
||||
|
||||
// CgroupNotifierFactory knows how to make CgroupNotifiers which integrate with the kernel
|
||||
type CgroupNotifierFactory struct{}
|
||||
|
||||
// NewCgroupNotifier implements the NotifierFactory interface
|
||||
func (n *CgroupNotifierFactory) NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) {
|
||||
return NewCgroupNotifier(path, attribute, threshold)
|
||||
}
|
||||
176
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go
generated
vendored
176
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go
generated
vendored
|
|
@ -18,43 +18,47 @@ package eviction
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type memcgThresholdNotifier struct {
|
||||
watchfd int
|
||||
controlfd int
|
||||
eventfd int
|
||||
handler thresholdNotifierHandlerFunc
|
||||
description string
|
||||
const (
|
||||
// eventSize is the number of bytes returned by a successful read from an eventfd
|
||||
// see http://man7.org/linux/man-pages/man2/eventfd.2.html for more information
|
||||
eventSize = 8
|
||||
// numFdEvents is the number of events we can record at once.
|
||||
// If EpollWait finds more than this, they will be missed.
|
||||
numFdEvents = 6
|
||||
)
|
||||
|
||||
type linuxCgroupNotifier struct {
|
||||
eventfd int
|
||||
epfd int
|
||||
stop chan struct{}
|
||||
stopLock sync.Mutex
|
||||
}
|
||||
|
||||
var _ ThresholdNotifier = &memcgThresholdNotifier{}
|
||||
var _ CgroupNotifier = &linuxCgroupNotifier{}
|
||||
|
||||
// NewMemCGThresholdNotifier sends notifications when a cgroup threshold
|
||||
// is crossed (in either direction) for a given cgroup attribute
|
||||
func NewMemCGThresholdNotifier(path, attribute, threshold, description string, handler thresholdNotifierHandlerFunc) (ThresholdNotifier, error) {
|
||||
watchfd, err := unix.Open(fmt.Sprintf("%s/%s", path, attribute), unix.O_RDONLY, 0)
|
||||
// NewCgroupNotifier returns a linuxCgroupNotifier, which performs cgroup control operations required
|
||||
// to receive notifications from the cgroup when the threshold is crossed in either direction.
|
||||
func NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) {
|
||||
var watchfd, eventfd, epfd, controlfd int
|
||||
var err error
|
||||
watchfd, err = unix.Open(fmt.Sprintf("%s/%s", path, attribute), unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
unix.Close(watchfd)
|
||||
}
|
||||
}()
|
||||
controlfd, err := unix.Open(fmt.Sprintf("%s/cgroup.event_control", path), unix.O_WRONLY, 0)
|
||||
defer unix.Close(watchfd)
|
||||
controlfd, err = unix.Open(fmt.Sprintf("%s/cgroup.event_control", path), unix.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
unix.Close(controlfd)
|
||||
}
|
||||
}()
|
||||
eventfd, err := unix.Eventfd(0, unix.EFD_CLOEXEC)
|
||||
defer unix.Close(controlfd)
|
||||
eventfd, err = unix.Eventfd(0, unix.EFD_CLOEXEC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -63,55 +67,119 @@ func NewMemCGThresholdNotifier(path, attribute, threshold, description string, h
|
|||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
// Close eventfd if we get an error later in initialization
|
||||
if err != nil {
|
||||
unix.Close(eventfd)
|
||||
}
|
||||
}()
|
||||
glog.V(3).Infof("eviction: setting notification threshold to %s", threshold)
|
||||
config := fmt.Sprintf("%d %d %s", eventfd, watchfd, threshold)
|
||||
epfd, err = unix.EpollCreate1(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epfd < 0 {
|
||||
err = fmt.Errorf("EpollCreate1 call failed")
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
// Close epfd if we get an error later in initialization
|
||||
if err != nil {
|
||||
unix.Close(epfd)
|
||||
}
|
||||
}()
|
||||
config := fmt.Sprintf("%d %d %d", eventfd, watchfd, threshold)
|
||||
_, err = unix.Write(controlfd, []byte(config))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &memcgThresholdNotifier{
|
||||
watchfd: watchfd,
|
||||
controlfd: controlfd,
|
||||
eventfd: eventfd,
|
||||
handler: handler,
|
||||
description: description,
|
||||
return &linuxCgroupNotifier{
|
||||
eventfd: eventfd,
|
||||
epfd: epfd,
|
||||
stop: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getThresholdEvents(eventfd int, eventCh chan<- struct{}, stop ThresholdStopCh) {
|
||||
func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) {
|
||||
err := unix.EpollCtl(n.epfd, unix.EPOLL_CTL_ADD, n.eventfd, &unix.EpollEvent{
|
||||
Fd: int32(n.eventfd),
|
||||
Events: unix.EPOLLIN,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: error adding epoll eventfd: %v", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
buf := make([]byte, 8)
|
||||
_, err := unix.Read(eventfd, buf)
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
event, err := wait(n.epfd, n.eventfd, notifierRefreshInterval)
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: error while waiting for memcg events: %v", err)
|
||||
return
|
||||
} else if !event {
|
||||
// Timeout on wait. This is expected if the threshold was not crossed
|
||||
continue
|
||||
}
|
||||
// Consume the event from the eventfd
|
||||
buf := make([]byte, eventSize)
|
||||
_, err = unix.Read(n.eventfd, buf)
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: error reading memcg events: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case eventCh <- struct{}{}:
|
||||
case <-stop.Ch():
|
||||
return
|
||||
}
|
||||
eventCh <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *memcgThresholdNotifier) Start(stop ThresholdStopCh) {
|
||||
eventCh := make(chan struct{})
|
||||
go getThresholdEvents(n.eventfd, eventCh, stop)
|
||||
for {
|
||||
select {
|
||||
case <-stop.Ch():
|
||||
glog.V(3).Infof("eviction: stopping threshold notifier")
|
||||
unix.Close(n.watchfd)
|
||||
unix.Close(n.controlfd)
|
||||
unix.Close(n.eventfd)
|
||||
return
|
||||
case <-eventCh:
|
||||
glog.V(2).Infof("eviction: threshold crossed")
|
||||
n.handler(n.description)
|
||||
// wait waits up to notifierRefreshInterval for an event on the Epoll FD for the
|
||||
// eventfd we are concerned about. It returns an error if one occurrs, and true
|
||||
// if the consumer should read from the eventfd.
|
||||
func wait(epfd, eventfd int, timeout time.Duration) (bool, error) {
|
||||
events := make([]unix.EpollEvent, numFdEvents+1)
|
||||
timeoutMS := int(timeout / time.Millisecond)
|
||||
n, err := unix.EpollWait(epfd, events, timeoutMS)
|
||||
if n == -1 {
|
||||
if err == unix.EINTR {
|
||||
// Interrupt, ignore the error
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if n == 0 {
|
||||
// Timeout
|
||||
return false, nil
|
||||
}
|
||||
if n > numFdEvents {
|
||||
return false, fmt.Errorf("epoll_wait returned more events than we know what to do with")
|
||||
}
|
||||
for _, event := range events[:n] {
|
||||
if event.Fd == int32(eventfd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 || event.Events&unix.EPOLLERR != 0 || event.Events&unix.EPOLLIN != 0 {
|
||||
// EPOLLHUP: should not happen, but if it does, treat it as a wakeup.
|
||||
|
||||
// EPOLLERR: If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
|
||||
// EPOLLIN: There is data to read.
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// An event occurred that we don't care about.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (n *linuxCgroupNotifier) Stop() {
|
||||
n.stopLock.Lock()
|
||||
defer n.stopLock.Unlock()
|
||||
select {
|
||||
case <-n.stop:
|
||||
// the linuxCgroupNotifier is already stopped
|
||||
return
|
||||
default:
|
||||
}
|
||||
unix.Close(n.eventfd)
|
||||
unix.Close(n.epfd)
|
||||
close(n.stop)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,10 +18,16 @@ limitations under the License.
|
|||
|
||||
package eviction
|
||||
|
||||
import "fmt"
|
||||
import "github.com/golang/glog"
|
||||
|
||||
// NewMemCGThresholdNotifier sends notifications when a cgroup threshold
|
||||
// is crossed (in either direction) for a given cgroup attribute
|
||||
func NewMemCGThresholdNotifier(path, attribute, threshold, description string, handler thresholdNotifierHandlerFunc) (ThresholdNotifier, error) {
|
||||
return nil, fmt.Errorf("threshold notification not supported")
|
||||
// NewCgroupNotifier creates a cgroup notifier that does nothing because cgroups do not exist on non-linux systems.
|
||||
func NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) {
|
||||
glog.V(5).Infof("cgroup notifications not supported")
|
||||
return &unsupportedThresholdNotifier{}, nil
|
||||
}
|
||||
|
||||
type unsupportedThresholdNotifier struct{}
|
||||
|
||||
func (*unsupportedThresholdNotifier) Start(_ chan<- struct{}) {}
|
||||
|
||||
func (*unsupportedThresholdNotifier) Stop() {}
|
||||
|
|
|
|||
|
|
@ -131,20 +131,30 @@ type nodeReclaimFunc func() error
|
|||
// nodeReclaimFuncs is an ordered list of nodeReclaimFunc
|
||||
type nodeReclaimFuncs []nodeReclaimFunc
|
||||
|
||||
// thresholdNotifierHandlerFunc is a function that takes action in response to a crossed threshold
|
||||
type thresholdNotifierHandlerFunc func(thresholdDescription string)
|
||||
|
||||
// ThresholdStopCh is an interface for a channel which is closed to stop waiting goroutines.
|
||||
// Implementations of ThresholdStopCh must correctly handle concurrent calls to all functions.
|
||||
type ThresholdStopCh interface {
|
||||
// Reset closes the channel if it can be closed, and returns true if it was closed.
|
||||
// Reset also creates a new channel.
|
||||
Reset() bool
|
||||
// Ch returns the channel that is closed when Reset() is called
|
||||
Ch() <-chan struct{}
|
||||
// CgroupNotifier generates events from cgroup events
|
||||
type CgroupNotifier interface {
|
||||
// Start causes the CgroupNotifier to begin notifying on the eventCh
|
||||
Start(eventCh chan<- struct{})
|
||||
// Stop stops all processes and cleans up file descriptors associated with the CgroupNotifier
|
||||
Stop()
|
||||
}
|
||||
|
||||
// ThresholdNotifier notifies the user when an attribute crosses a threshold value
|
||||
// NotifierFactory creates CgroupNotifer
|
||||
type NotifierFactory interface {
|
||||
// NewCgroupNotifier creates a CgroupNotifier that creates events when the threshold
|
||||
// on the attribute in the cgroup specified by the path is crossed.
|
||||
NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error)
|
||||
}
|
||||
|
||||
// ThresholdNotifier manages CgroupNotifiers based on memory eviction thresholds, and performs a function
|
||||
// when memory eviction thresholds are crossed
|
||||
type ThresholdNotifier interface {
|
||||
Start(ThresholdStopCh)
|
||||
// Start calls the notifier function when the CgroupNotifier notifies the ThresholdNotifier that an event occurred
|
||||
Start()
|
||||
// UpdateThreshold updates the memory cgroup threshold based on the metrics provided.
|
||||
// Calling UpdateThreshold with recent metrics allows the ThresholdNotifier to trigger at the
|
||||
// eviction threshold more accurately
|
||||
UpdateThreshold(summary *statsapi.Summary) error
|
||||
// Description produces a relevant string describing the Memory Threshold Notifier
|
||||
Description() string
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ type Manager struct {
|
|||
// * If the token is refreshed successfully, save it in the cache and return the token.
|
||||
// * If refresh fails and the old token is still valid, log an error and return the old token.
|
||||
// * If refresh fails and the old token is no longer valid, return an error
|
||||
func (m *Manager) GetServiceAccountToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
key := keyFunc(name, namespace, tr)
|
||||
ctr, ok := m.get(key)
|
||||
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ var ipsetInfo = []struct {
|
|||
// ipsetWithIptablesChain is the ipsets list with iptables source chain and the chain jump to
|
||||
// `iptables -t nat -A <from> -m set --match-set <name> <matchType> -j <to>`
|
||||
// example: iptables -t nat -A KUBE-SERVICES -m set --match-set KUBE-NODE-PORT-TCP dst -j KUBE-NODE-PORT
|
||||
// ipsets with ohter match rules will be create Individually.
|
||||
// ipsets with other match rules will be created Individually.
|
||||
var ipsetWithIptablesChain = []struct {
|
||||
name string
|
||||
from string
|
||||
|
|
@ -366,6 +366,7 @@ func NewProxier(ipt utiliptables.Interface,
|
|||
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder),
|
||||
syncPeriod: syncPeriod,
|
||||
minSyncPeriod: minSyncPeriod,
|
||||
excludeCIDRs: excludeCIDRs,
|
||||
iptables: ipt,
|
||||
masqueradeAll: masqueradeAll,
|
||||
masqueradeMark: masqueradeMark,
|
||||
|
|
|
|||
|
|
@ -191,12 +191,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name)
|
||||
|
|
@ -213,6 +207,13 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalBytes(configMap)
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
|
|
@ -243,7 +244,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
16
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
16
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
|
|
@ -180,13 +180,6 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
|
|
@ -194,6 +187,15 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, *b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -194,18 +194,19 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(s.volName, dir, *s.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := s.collectData()
|
||||
if err != nil {
|
||||
glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := volumeutil.MakeNestedMountpoints(s.volName, dir, *s.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
|
|
@ -225,7 +226,6 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -190,12 +190,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
|
||||
|
|
@ -212,6 +206,13 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalSecretBytes(secret)
|
||||
glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
|
|
@ -242,7 +243,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
)
|
||||
|
||||
|
|
@ -30,13 +32,32 @@ const (
|
|||
updateRetryInterval = 5 * time.Second
|
||||
updateRetryTimeout = 1 * time.Minute
|
||||
waitRetryInterval = 5 * time.Second
|
||||
waitRetryTImeout = 5 * time.Minute
|
||||
waitRetryTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func ScaleResourceWithRetries(scaler kubectl.Scaler, namespace, name string, size uint, gr schema.GroupResource) error {
|
||||
waitForScale := kubectl.NewRetryParams(updateRetryInterval, updateRetryTimeout)
|
||||
waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTImeout)
|
||||
if err := scaler.Scale(namespace, name, size, nil, waitForScale, waitForReplicas, gr); err != nil {
|
||||
func RetryErrorCondition(condition wait.ConditionFunc) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
done, err := condition()
|
||||
if err != nil && IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
|
||||
func ScaleResourceWithRetries(scalesGetter scale.ScalesGetter, namespace, name string, size uint, gr schema.GroupResource) error {
|
||||
scaler := kubectl.NewScaler(scalesGetter)
|
||||
preconditions := &kubectl.ScalePrecondition{
|
||||
Size: -1,
|
||||
ResourceVersion: "",
|
||||
}
|
||||
waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTimeout)
|
||||
cond := RetryErrorCondition(kubectl.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gr))
|
||||
err := wait.PollImmediate(updateRetryInterval, updateRetryTimeout, cond)
|
||||
if err == nil {
|
||||
err = kubectl.WaitForScaleHasDesiredReplicas(scalesGetter, gr, name, namespace, size, waitForReplicas)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
Loading…
Reference in New Issue