Write events when autoprovisioned nodegroup is created / deleted
This commit is contained in:
parent
c7aa56a82a
commit
9c2ebccbfe
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/metrics"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator"
|
||||
|
|
@ -821,7 +822,7 @@ func hasNoScaleDownAnnotation(node *apiv1.Node) bool {
|
|||
return node.Annotations[ScaleDownDisabledKey] == "true"
|
||||
}
|
||||
|
||||
func cleanUpNodeAutoprovisionedGroups(cloudProvider cloudprovider.CloudProvider) error {
|
||||
func cleanUpNodeAutoprovisionedGroups(cloudProvider cloudprovider.CloudProvider, logRecorder *utils.LogEventRecorder) error {
|
||||
nodeGroups := cloudProvider.NodeGroups()
|
||||
for _, nodeGroup := range nodeGroups {
|
||||
if !nodeGroup.Autoprovisioned() {
|
||||
|
|
@ -832,9 +833,14 @@ func cleanUpNodeAutoprovisionedGroups(cloudProvider cloudprovider.CloudProvider)
|
|||
return err
|
||||
}
|
||||
if size == 0 {
|
||||
ngId := nodeGroup.Id()
|
||||
if err := nodeGroup.Delete(); err != nil {
|
||||
logRecorder.Eventf(apiv1.EventTypeWarning, "FailedToDeleteNodeGroup",
|
||||
"NodeAutoprovisioning: attempt to delete node group %v failed: %v", ngId, err)
|
||||
return err
|
||||
}
|
||||
logRecorder.Eventf(apiv1.EventTypeNormal, "DeletedNodeGroup",
|
||||
"NodeAutoprovisioning: removed node group %v", ngId)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1114,7 +1114,10 @@ func TestCleanUpNodeAutoprovisionedGroups(t *testing.T) {
|
|||
provider.AddNode("ng3", n1)
|
||||
assert.NotNil(t, provider)
|
||||
|
||||
assert.NoError(t, cleanUpNodeAutoprovisionedGroups(provider))
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeRecorder := kube_util.CreateEventRecorder(fakeClient)
|
||||
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
|
||||
assert.NoError(t, cleanUpNodeAutoprovisionedGroups(provider, fakeLogRecorder))
|
||||
}
|
||||
|
||||
func TestCalculateCoresAndMemoryTotal(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -214,6 +214,8 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes
|
|||
oldId := bestOption.NodeGroup.Id()
|
||||
err := bestOption.NodeGroup.Create()
|
||||
if err != nil {
|
||||
context.LogRecorder.Eventf(apiv1.EventTypeWarning, "FailedToCreateNodeGroup",
|
||||
"NodeAutoprovisioning: attempt to create node group %v failed: %v", oldId, err)
|
||||
return false, errors.ToAutoscalerError(errors.CloudProviderError, err)
|
||||
}
|
||||
newId := bestOption.NodeGroup.Id()
|
||||
|
|
@ -224,6 +226,9 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes
|
|||
nodeInfos[newId] = nodeInfos[oldId]
|
||||
delete(nodeInfos, oldId)
|
||||
}
|
||||
context.LogRecorder.Eventf(apiv1.EventTypeNormal, "CreatedNodeGroup",
|
||||
"NodeAutoprovisioning: created new node group %v", newId)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -303,7 +303,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
|
|||
// We want to delete unneeded Node Groups only if there was no recent scale up,
|
||||
// and there is no current delete in progress and there was no recent errors.
|
||||
if a.AutoscalingContext.NodeAutoprovisioningEnabled {
|
||||
err := cleanUpNodeAutoprovisionedGroups(a.AutoscalingContext.CloudProvider)
|
||||
err := cleanUpNodeAutoprovisionedGroups(a.AutoscalingContext.CloudProvider, a.AutoscalingContext.LogRecorder)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to clean up unneded node groups: %v", err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue