Merge pull request #5613 from hezhizhen/chore

Fix typo & replace deprecated functions & replace literals with constants
This commit is contained in:
Kubernetes Prow Robot 2023-03-24 05:32:33 -07:00 committed by GitHub
commit d91bee9e4e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 161 additions and 151 deletions

View File

@ -93,4 +93,4 @@ VPA objects that have been defined with `poc.autoscaling.k8s.io/v1alpha1`
apiVersion. Then use `vpa-up.sh` to bring up the new version of VPA and create
your VPA objects from the scratch, passing apiVersion
`autoscaling.k8s.io/v1beta2` and switching from selector to targetRef, as
described in the prevous section.
described in the previous section.

View File

@ -68,7 +68,7 @@ The current default version is Vertical Pod Autoscaler 0.13.0
remove this API version. While for now you can continue to use `v1beta2` API we
recommend using `autoscaling.k8s.io/v1` instead. `v1` and `v1beta2` APIs are
almost identical (`v1` API has some fields which are not present in `v1beta2)
so simply chaning which API version you're calling should be enough in almost
so simply changing which API version you're calling should be enough in almost
all cases.
### Notice on removal of v1beta1 version (>=0.5.0)

View File

@ -1,5 +1,5 @@
**WARNING**
:warning: We no longer intend to implemnt this KEP. Instead we recommend using
:warning: We no longer intend to implement this KEP. Instead we recommend using
[Unhealthy Pod Eviction Policy for PDBs](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/3017-pod-healthy-policy-for-pdb/README.md).
# KEP-4902: Delete OOM Pods
@ -62,7 +62,7 @@ fit their needs.
## Design Details
When the eviction fails the pod will not just get blindy deleted, but further
When the eviction fails the pod will not just get blindly deleted, but further
checks will occur. Which gives us the following checklist:
- [ ] Was at least one container in the Pod terminated due to being OOM
(`OOMKilled`)?

View File

@ -2,7 +2,7 @@
- [Intro](#intro)
- [Running](#running)
- [Implementation](#implmentation)
- [Implementation](#implementation)
## Intro

View File

@ -17,7 +17,7 @@ limitations under the License.
package main
import (
"io/ioutil"
"os"
"k8s.io/klog/v2"
)
@ -31,7 +31,7 @@ type certsConfig struct {
}
func readFile(filePath string) []byte {
res, err := ioutil.ReadFile(filePath)
res, err := os.ReadFile(filePath)
if err != nil {
klog.Errorf("Error reading certificate file at %s: %v", filePath, err)
return nil

View File

@ -48,7 +48,7 @@ if [[ $? -ne 0 ]]; then
fi
set -o errexit
# Create a server certiticate
# Create a server certificate
openssl genrsa -out ${TMP_DIR}/serverKey.pem 2048
# Note the CN is the DNS name of the service of the webhook.
openssl req -new -key ${TMP_DIR}/serverKey.pem -out ${TMP_DIR}/server.csr -subj "/CN=vpa-webhook.kube-system.svc" -config ${TMP_DIR}/server.conf -addext "subjectAltName = DNS:vpa-webhook.kube-system.svc"

View File

@ -19,7 +19,7 @@ package logic
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"k8s.io/api/admission/v1"
@ -133,7 +133,7 @@ func (s *AdmissionServer) Serve(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
if data, err := io.ReadAll(r.Body); err == nil {
body = data
}
}

View File

@ -119,7 +119,7 @@ type PodUpdatePolicy struct {
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
}
// UpdateMode controls when autoscaler applies changes to the pod resoures.
// UpdateMode controls when autoscaler applies changes to the pod resources.
// +kubebuilder:validation:Enum=Off;Initial;Recreate;Auto
type UpdateMode string

View File

@ -84,7 +84,7 @@ type PodUpdatePolicy struct {
UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"`
}
// UpdateMode controls when autoscaler applies changes to the pod resoures.
// UpdateMode controls when autoscaler applies changes to the pod resources.
type UpdateMode string
const (

View File

@ -99,7 +99,7 @@ type PodUpdatePolicy struct {
UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"`
}
// UpdateMode controls when autoscaler applies changes to the pod resoures.
// UpdateMode controls when autoscaler applies changes to the pod resources.
// +kubebuilder:validation:Enum=Off;Initial;Recreate;Auto
type UpdateMode string

View File

@ -84,7 +84,7 @@ type PodUpdatePolicy struct {
UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"`
}
// UpdateMode controls when autoscaler applies changes to the pod resoures.
// UpdateMode controls when autoscaler applies changes to the pod resources.
type UpdateMode string
const (

View File

@ -46,7 +46,7 @@ import (
v1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
klog "k8s.io/klog/v2"
"k8s.io/klog/v2"
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
)
@ -310,13 +310,13 @@ func (feeder *clusterStateFeeder) GarbageCollectCheckpoints() {
klog.V(3).Info("Starting garbage collection of checkpoints")
feeder.LoadVPAs()
namspaceList, err := feeder.coreClient.Namespaces().List(context.TODO(), metav1.ListOptions{})
namespaceList, err := feeder.coreClient.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list namespaces. Reason: %+v", err)
return
}
for _, namespaceItem := range namspaceList.Items {
for _, namespaceItem := range namespaceList.Items {
namespace := namespaceItem.Name
checkpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
@ -375,7 +375,7 @@ func filterVPAs(feeder *clusterStateFeeder, allVpaCRDs []*vpa_types.VerticalPodA
return vpaCRDs
}
// Fetch VPA objects and load them into the cluster state.
// LoadVPAs fetches VPA objects and loads them into the cluster state.
func (feeder *clusterStateFeeder) LoadVPAs() {
// List VPA API objects.
allVpaCRDs, err := feeder.vpaLister.List(labels.Everything())
@ -424,7 +424,7 @@ func (feeder *clusterStateFeeder) LoadVPAs() {
feeder.clusterState.ObservedVpas = vpaCRDs
}
// Load pod into the cluster state.
// LoadPods loads pod into the cluster state.
func (feeder *clusterStateFeeder) LoadPods() {
podSpecs, err := feeder.specClient.GetPodSpecs()
if err != nil {

View File

@ -37,6 +37,17 @@ import (
"k8s.io/client-go/tools/cache"
)
const (
testNamespace = "test-namespace"
testDeployment = "test-deployment"
testReplicaSet = "test-rs"
testStatefulSet = "test-statefulset"
testDaemonSet = "test-daemonset"
testCronJob = "test-cronjob"
testJob = "test-job"
testReplicationController = "test-rc"
)
var wellKnownControllers = []wellKnownController{daemonSet, deployment, replicaSet, statefulSet, replicationController, job, cronJob}
var trueVar = true
@ -62,14 +73,14 @@ func simpleControllerFetcher() *controllerFetcher {
scaleNamespacer := &scalefake.FakeScaleClient{}
f.scaleNamespacer = scaleNamespacer
//return not found if if tries to find the scale subresouce on bah
// return not found if if tries to find the scale subresource on bah
scaleNamespacer.AddReactor("get", "bah", func(action core.Action) (handled bool, ret runtime.Object, err error) {
groupResource := schema.GroupResource{}
error := apierrors.NewNotFound(groupResource, "Foo")
return true, nil, error
})
//resource that can scale
// resource that can scale
scaleNamespacer.AddReactor("get", "iCanScale", func(action core.Action) (handled bool, ret runtime.Object, err error) {
ret = &autoscalingv1.Scale{
@ -124,109 +135,109 @@ func TestControllerFetcher(t *testing.T) {
{
name: "deployment doesn't exist",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
expectedKey: nil,
expectedError: fmt.Errorf("Deployment test-namesapce/test-deployment does not exist"),
expectedError: fmt.Errorf("Deployment %s/%s does not exist", testNamespace, testDeployment),
},
{
name: "deployment no parent",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}}, // Deployment has no parrent
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}}, // Deployment has no parent
expectedError: nil,
},
{
name: "deployment with parent",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-rs", Kind: "ReplicaSet", Namespace: "test-namesapce"}},
Name: testReplicaSet, Kind: "ReplicaSet", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
},
}, &appsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-rs",
Namespace: "test-namesapce",
Name: testReplicaSet,
Namespace: testNamespace,
OwnerReferences: []metav1.OwnerReference{
{
Controller: &trueVar,
Kind: "Deployment",
Name: "test-deployment",
Name: testDeployment,
},
},
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}}, // Deployment has no parent
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}}, // Deployment has no parent
expectedError: nil,
},
{
name: "StatefulSet",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-statefulset", Kind: "StatefulSet", Namespace: "test-namesapce"}},
Name: testStatefulSet, Kind: "StatefulSet", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-statefulset",
Namespace: "test-namesapce",
Name: testStatefulSet,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-statefulset", Kind: "StatefulSet", Namespace: "test-namesapce"}}, // StatefulSet has no parent
Name: testStatefulSet, Kind: "StatefulSet", Namespace: testNamespace}}, // StatefulSet has no parent
expectedError: nil,
},
{
name: "DaemonSet",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-daemonset", Kind: "DaemonSet", Namespace: "test-namesapce"}},
Name: testDaemonSet, Kind: "DaemonSet", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-daemonset",
Namespace: "test-namesapce",
Name: testDaemonSet,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-daemonset", Kind: "DaemonSet", Namespace: "test-namesapce"}}, // DaemonSet has no parent
Name: testDaemonSet, Kind: "DaemonSet", Namespace: testNamespace}}, // DaemonSet has no parent
expectedError: nil,
},
{
name: "CronJob",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-job", Kind: "Job", Namespace: "test-namespace"}},
Name: testJob, Kind: "Job", Namespace: testNamespace}},
objects: []runtime.Object{&batchv1.Job{
TypeMeta: metav1.TypeMeta{
Kind: "Job",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "test-namespace",
Name: testJob,
Namespace: testNamespace,
OwnerReferences: []metav1.OwnerReference{
{
Controller: &trueVar,
Kind: "CronJob",
Name: "test-cronjob",
Name: testCronJob,
},
},
},
@ -235,65 +246,65 @@ func TestControllerFetcher(t *testing.T) {
Kind: "CronJob",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cronjob",
Namespace: "test-namespace",
Name: testCronJob,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-cronjob", Kind: "CronJob", Namespace: "test-namespace"}}, // CronJob has no parent
Name: testCronJob, Kind: "CronJob", Namespace: testNamespace}}, // CronJob has no parent
expectedError: nil,
},
{
name: "CronJob no parent",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-cronjob", Kind: "CronJob", Namespace: "test-namespace"}},
Name: testCronJob, Kind: "CronJob", Namespace: testNamespace}},
objects: []runtime.Object{&batchv1.CronJob{
TypeMeta: metav1.TypeMeta{
Kind: "CronJob",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cronjob",
Namespace: "test-namespace",
Name: testCronJob,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-cronjob", Kind: "CronJob", Namespace: "test-namespace"}}, // CronJob has no parent
Name: testCronJob, Kind: "CronJob", Namespace: testNamespace}}, // CronJob has no parent
expectedError: nil,
},
{
name: "rc no parent",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-rc", Kind: "ReplicationController", Namespace: "test-namesapce"}},
Name: testReplicationController, Kind: "ReplicationController", Namespace: testNamespace}},
objects: []runtime.Object{&corev1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-rc",
Namespace: "test-namesapce",
Name: testReplicationController,
Namespace: testNamespace,
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-rc", Kind: "ReplicationController", Namespace: "test-namesapce"}}, // ReplicationController has no parent
Name: testReplicationController, Kind: "ReplicationController", Namespace: testNamespace}}, // ReplicationController has no parent
expectedError: nil,
},
{
name: "deployment cycle in ownership",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
// Deployment points to itself
OwnerReferences: []metav1.OwnerReference{
{
Controller: &trueVar,
Kind: "Deployment",
Name: "test-deployment",
Name: testDeployment,
},
},
},
@ -304,14 +315,14 @@ func TestControllerFetcher(t *testing.T) {
{
name: "deployment, parent with no scale subresource",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
// Parent that does not support scale subresource and is not well known
OwnerReferences: []metav1.OwnerReference{
{
@ -324,20 +335,20 @@ func TestControllerFetcher(t *testing.T) {
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}}, // Parent does not support scale subresource so should return itself"
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}}, // Parent does not support scale subresource so should return itself"
expectedError: nil,
},
{
name: "deployment, parent not well known with scale subresource",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
// Parent that support scale subresource and is not well known
OwnerReferences: []metav1.OwnerReference{
{
@ -350,20 +361,20 @@ func TestControllerFetcher(t *testing.T) {
},
}},
expectedKey: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "iCanScale", Kind: "Scale", Namespace: "test-namesapce"}, ApiVersion: "Foo/Foo"}, // Parent supports scale subresource"
Name: "iCanScale", Kind: "Scale", Namespace: testNamespace}, ApiVersion: "Foo/Foo"}, // Parent supports scale subresource"
expectedError: nil,
},
{
name: "pod, parent is node",
key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{
Name: "test-deployment", Kind: "Deployment", Namespace: "test-namesapce"}},
Name: testDeployment, Kind: "Deployment", Namespace: testNamespace}},
objects: []runtime.Object{&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: "test-namesapce",
Name: testDeployment,
Namespace: testNamespace,
// Parent is a node
OwnerReferences: []metav1.OwnerReference{
{

View File

@ -74,7 +74,7 @@ type prometheusHistoryProvider struct {
historyResolution prommodel.Duration
}
// NewPrometheusHistoryProvider contructs a history provider that gets data from Prometheus.
// NewPrometheusHistoryProvider constructs a history provider that gets data from Prometheus.
func NewPrometheusHistoryProvider(config PrometheusHistoryProviderConfig) (HistoryProvider, error) {
promClient, err := promapi.NewClient(promapi.Config{
Address: config.Address,

View File

@ -24,7 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
recommender_metrics "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/recommender"
klog "k8s.io/klog/v2"
"k8s.io/klog/v2"
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
)
@ -41,7 +41,7 @@ type ContainerMetricsSnapshot struct {
Usage model.Resources
}
// MetricsClient provides simple metrics on resources usage on containter level.
// MetricsClient provides simple metrics on resources usage on container level.
type MetricsClient interface {
// GetContainersMetrics returns an array of ContainerMetricsSnapshots,
// representing resource usage for every running container in the cluster

View File

@ -247,7 +247,7 @@ func (a *AggregateContainerState) SaveToCheckpoint() (*vpa_types.VerticalPodAuto
// into the AggregateContainerState.
func (a *AggregateContainerState) LoadFromCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpointStatus) error {
if checkpoint.Version != SupportedCheckpointVersion {
return fmt.Errorf("unsuported checkpoint version %s", checkpoint.Version)
return fmt.Errorf("unsupported checkpoint version %s", checkpoint.Version)
}
a.TotalSamplesCount = checkpoint.TotalSamplesCount
a.FirstSampleStart = checkpoint.FirstSampleStart.Time

View File

@ -25,7 +25,7 @@ import (
autoscaling "k8s.io/api/autoscaling/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/labels"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/controller_fetcher"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
@ -110,10 +110,10 @@ func TestClusterGCAggregateContainerStateDeletesOld(t *testing.T) {
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
// AggegateContainerState are valid for 8 days since last sample
// AggregateContainerState are valid for 8 days since last sample
cluster.garbageCollectAggregateCollectionStates(usageSample.MeasureStart.Add(9*24*time.Hour), testControllerFetcher)
// AggegateContainerState should be deleted from both cluster and vpa
// AggregateContainerState should be deleted from both cluster and vpa
assert.Empty(t, cluster.aggregateStateMap)
assert.Empty(t, vpa.aggregateContainerStates)
}
@ -137,14 +137,14 @@ func TestClusterGCAggregateContainerStateDeletesOldEmpty(t *testing.T) {
}
// Verify empty aggregate states are not removed right away.
cluster.garbageCollectAggregateCollectionStates(creationTime.Add(1*time.Minute), testControllerFetcher) // AggegateContainerState should be deleted from both cluster and vpa
cluster.garbageCollectAggregateCollectionStates(creationTime.Add(1*time.Minute), testControllerFetcher) // AggregateContainerState should be deleted from both cluster and vpa
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
// AggegateContainerState are valid for 8 days since creation
// AggregateContainerState are valid for 8 days since creation
cluster.garbageCollectAggregateCollectionStates(creationTime.Add(9*24*time.Hour), testControllerFetcher)
// AggegateContainerState should be deleted from both cluster and vpa
// AggregateContainerState should be deleted from both cluster and vpa
assert.Empty(t, cluster.aggregateStateMap)
assert.Empty(t, vpa.aggregateContainerStates)
}
@ -168,14 +168,14 @@ func TestClusterGCAggregateContainerStateDeletesEmptyInactiveWithoutController(t
cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller)
// AggegateContainerState should not be deleted as the pod is still active.
// AggregateContainerState should not be deleted as the pod is still active.
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
cluster.Pods[pod.ID].Phase = apiv1.PodSucceeded
cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller)
// AggegateContainerState should be empty as the pod is no longer active, controller is not alive
// AggregateContainerState should be empty as the pod is no longer active, controller is not alive
// and there are no usage samples.
assert.Empty(t, cluster.aggregateStateMap)
assert.Empty(t, vpa.aggregateContainerStates)
@ -197,14 +197,14 @@ func TestClusterGCAggregateContainerStateLeavesEmptyInactiveWithController(t *te
cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller)
// AggegateContainerState should not be deleted as the pod is still active.
// AggregateContainerState should not be deleted as the pod is still active.
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
cluster.Pods[pod.ID].Phase = apiv1.PodSucceeded
cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller)
// AggegateContainerState should not be delated as the controller is still alive.
// AggregateContainerState should not be deleted as the controller is still alive.
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
}
@ -224,7 +224,7 @@ func TestClusterGCAggregateContainerStateLeavesValid(t *testing.T) {
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
// AggegateContainerState are valid for 8 days since last sample
// AggregateContainerState are valid for 8 days since last sample
cluster.garbageCollectAggregateCollectionStates(usageSample.MeasureStart.Add(7*24*time.Hour), testControllerFetcher)
assert.NotEmpty(t, cluster.aggregateStateMap)
@ -250,7 +250,7 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) {
aggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)
assert.Contains(t, vpa.aggregateContainerStates, aggregateStateKey)
// AggegateContainerState are invalid after 8 days since last sample
// AggregateContainerState are invalid after 8 days since last sample
gcTimestamp := usageSample.MeasureStart.Add(10 * 24 * time.Hour)
cluster.garbageCollectAggregateCollectionStates(gcTimestamp, testControllerFetcher)
@ -275,7 +275,7 @@ func TestClusterGCRateLimiting(t *testing.T) {
cluster := NewClusterState(testGcPeriod)
usageSample := makeTestUsageSample()
sampleExpireTime := usageSample.MeasureStart.Add(9 * 24 * time.Hour)
// AggegateContainerState are valid for 8 days since last sample but this run
// AggregateContainerState are valid for 8 days since last sample but this run
// doesn't remove the sample, because we didn't add it yet.
cluster.RateLimitedGarbageCollectAggregateCollectionStates(sampleExpireTime, testControllerFetcher)
vpa := addTestVpa(cluster)
@ -293,7 +293,7 @@ func TestClusterGCRateLimiting(t *testing.T) {
assert.NotEmpty(t, cluster.aggregateStateMap)
assert.NotEmpty(t, vpa.aggregateContainerStates)
// AggegateContainerState should be deleted from both cluster and vpa
// AggregateContainerState should be deleted from both cluster and vpa
cluster.RateLimitedGarbageCollectAggregateCollectionStates(sampleExpireTime.Add(2*testGcPeriod), testControllerFetcher)
assert.Empty(t, cluster.aggregateStateMap)
assert.Empty(t, vpa.aggregateContainerStates)

View File

@ -32,7 +32,7 @@ type ContainerUsageSample struct {
MeasureStart time.Time
// Average CPU usage in cores or memory usage in bytes.
Usage ResourceAmount
// CPU or memory request at the time of measurment.
// CPU or memory request at the time of measurement.
Request ResourceAmount
// Which resource is this sample for.
Resource ResourceName

View File

@ -22,7 +22,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
labels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/labels"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
@ -119,7 +119,7 @@ func TestUpdateConditions(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
containerName := "container"
vpa := NewVpa(VpaID{Namespace: "test-namespace", VpaName: "my-facourite-vpa"}, labels.Nothing(), time.Unix(0, 0))
vpa := NewVpa(VpaID{Namespace: "test-namespace", VpaName: "my-favourite-vpa"}, labels.Nothing(), time.Unix(0, 0))
if tc.hasRecommendation {
vpa.Recommendation = test.Recommendation().WithContainer(containerName).WithTarget("5", "200").Get()
}

View File

@ -17,8 +17,8 @@ limitations under the License.
package mocktarget
import (
gomock "github.com/golang/mock/gomock"
labels "k8s.io/apimachinery/pkg/labels"
"github.com/golang/mock/gomock"
"k8s.io/apimachinery/pkg/labels"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
)
@ -40,7 +40,7 @@ func NewMockVpaTargetSelectorFetcher(ctrl *gomock.Controller) *MockVpaTargetSele
return mock
}
// EXPECT enables configuring expectaions
// EXPECT enables configuring expectations
func (_m *MockVpaTargetSelectorFetcher) EXPECT() *_MockVpaTargetSelectorFetcherRecorder {
return _m.recorder
}

View File

@ -31,7 +31,7 @@ import (
kube_client "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
klog "k8s.io/klog/v2"
"k8s.io/klog/v2"
)
const (
@ -109,7 +109,7 @@ func (e *podsEvictionRestrictionImpl) CanEvict(pod *apiv1.Pod) bool {
if singleGroupStats.running-singleGroupStats.evicted > shouldBeAlive {
return true
}
// If all pods are running and eviction tollerance is small evict 1 pod.
// If all pods are running and eviction tolerance is small evict 1 pod.
if singleGroupStats.running == singleGroupStats.configured &&
singleGroupStats.evictionTolerance == 0 &&
singleGroupStats.evicted == 0 {

View File

@ -45,7 +45,6 @@ func getBasicVpa() *vpa_types.VerticalPodAutoscaler {
}
func TestEvictReplicatedByController(t *testing.T) {
rc := apiv1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "rc",
@ -67,17 +66,17 @@ func TestEvictReplicatedByController(t *testing.T) {
}
testCases := []struct {
name string
replicas int32
evictionTollerance float64
vpa *vpa_types.VerticalPodAutoscaler
pods []podWithExpectations
name string
replicas int32
evictionTolerance float64
vpa *vpa_types.VerticalPodAutoscaler
pods []podWithExpectations
}{
{
name: "Evict only first pod (half of 3).",
replicas: 3,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Evict only first pod (half of 3).",
replicas: 3,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -97,10 +96,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Evict two pods (half of 4).",
replicas: 4,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Evict two pods (half of 4).",
replicas: 4,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
@ -126,10 +125,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Half of the population can be evicted. One pod is missing already.",
replicas: 4,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Half of the population can be evicted. One pod is missing already.",
replicas: 4,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -149,10 +148,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "For small eviction tollerance at least one pod is evicted.",
replicas: 3,
evictionTollerance: 0.1,
vpa: getBasicVpa(),
name: "For small eviction tolerance at least one pod is evicted.",
replicas: 3,
evictionTolerance: 0.1,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -172,10 +171,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Only 2 pods in replica of 3 and tollerance is 0. None of pods can be evicted.",
replicas: 3,
evictionTollerance: 0.1,
vpa: getBasicVpa(),
name: "Only 2 pods in replica of 3 and tolerance is 0. None of pods can be evicted.",
replicas: 3,
evictionTolerance: 0.1,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -190,10 +189,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Only pending pod can be evicted without violation of tollerance.",
replicas: 3,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Only pending pod can be evicted without violation of tolerance.",
replicas: 3,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -213,10 +212,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Pending pods are always evictable.",
replicas: 4,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Pending pods are always evictable.",
replicas: 4,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -241,10 +240,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Cannot evict a single Pod under default settings.",
replicas: 1,
evictionTollerance: 0.5,
vpa: getBasicVpa(),
name: "Cannot evict a single Pod under default settings.",
replicas: 1,
evictionTolerance: 0.5,
vpa: getBasicVpa(),
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -254,10 +253,10 @@ func TestEvictReplicatedByController(t *testing.T) {
},
},
{
name: "Can evict even a single Pod using PodUpdatePolicy.MinReplicas.",
replicas: 1,
evictionTollerance: 0.5,
vpa: vpaSingleReplica,
name: "Can evict even a single Pod using PodUpdatePolicy.MinReplicas.",
replicas: 1,
evictionTolerance: 0.5,
vpa: vpaSingleReplica,
pods: []podWithExpectations{
{
pod: generatePod().Get(),
@ -276,7 +275,7 @@ func TestEvictReplicatedByController(t *testing.T) {
for _, p := range testCase.pods {
pods = append(pods, p.pod)
}
factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, nil, 2, testCase.evictionTollerance)
factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, nil, 2, testCase.evictionTolerance)
eviction := factory.NewPodsEvictionRestriction(pods, testCase.vpa)
for i, p := range testCase.pods {
assert.Equalf(t, p.canEvict, eviction.CanEvict(p.pod), "TC %v - unexpected CanEvict result for pod-%v %#v", testCase.name, i, p.pod)

View File

@ -260,10 +260,10 @@ func (u *updater) getPodsUpdateOrder(pods []*apiv1.Pod, vpa *vpa_types.VerticalP
return priorityCalculator.GetSortedPods(u.evictionAdmission)
}
func filterNonEvictablePods(pods []*apiv1.Pod, evictionRestriciton eviction.PodsEvictionRestriction) []*apiv1.Pod {
func filterNonEvictablePods(pods []*apiv1.Pod, evictionRestriction eviction.PodsEvictionRestriction) []*apiv1.Pod {
result := make([]*apiv1.Pod, 0)
for _, pod := range pods {
if evictionRestriciton.CanEvict(pod) {
if evictionRestriction.CanEvict(pod) {
result = append(result, pod)
}
}

View File

@ -217,7 +217,7 @@ func TestGetUpdatePriority(t *testing.T) {
}
}
// Verify GetUpdatePriorty does not encounter a NPE when there is no
// Verify GetUpdatePriority does not encounter a NPE when there is no
// recommendation for a container.
func TestGetUpdatePriority_NoRecommendationForContainer(t *testing.T) {
p := NewProcessor()

View File

@ -82,7 +82,7 @@ var (
"Time spent in various parts of VPA admission controller")
)
// Register initializes all metrics for VPA Admission Contoller
// Register initializes all metrics for VPA Admission Controller
func Register() {
prometheus.MustRegister(admissionCount)
prometheus.MustRegister(admissionLatency)

View File

@ -97,7 +97,7 @@ func (c *Client) UpdateStatus() error {
return err
}
lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()}
lease.Spec.HolderIdentity = pointer.StringPtr(c.holderIdentity)
lease.Spec.HolderIdentity = pointer.String(c.holderIdentity)
_, err = c.client.Update(context.TODO(), lease, metav1.UpdateOptions{})
if apierrors.IsConflict(err) {
// Lease was updated by an another replica of the component.
@ -126,8 +126,8 @@ func (c *Client) newLease() *apicoordinationv1.Lease {
Namespace: c.leaseNamespace,
},
Spec: apicoordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr(c.holderIdentity),
LeaseDurationSeconds: pointer.Int32Ptr(c.leaseDurationSeconds),
HolderIdentity: pointer.String(c.holderIdentity),
LeaseDurationSeconds: pointer.Int32(c.leaseDurationSeconds),
},
}
}

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
klog "k8s.io/klog/v2"
"k8s.io/klog/v2"
)
// NewCappingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation
@ -116,7 +116,7 @@ func getCappedRecommendationForContainer(
cappingAnnotations := make([]string, 0)
process := func(recommendation apiv1.ResourceList, genAnnotations bool) {
// TODO: Add anotation if limitRange is conflicting with VPA policy.
// TODO: Add annotation if limitRange is conflicting with VPA policy.
limitAnnotations := applyContainerLimitRange(recommendation, container, limitRange)
annotations := applyVPAPolicy(recommendation, containerPolicy)
if genAnnotations {

View File

@ -659,7 +659,7 @@ func TestApplyPodLimitRange(t *testing.T) {
},
},
{
name: "cap mem request to pod min, only one container with recomendation",
name: "cap mem request to pod min, only one container with recommendation",
resources: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container1",