mirror of https://github.com/kubernetes/kops.git
Merge pull request #16213 from rifelpet/targetlifecyclestate
aws: Use instance metadata to get warm pool state
This commit is contained in:
commit
988befb20d
|
@ -1116,6 +1116,9 @@ func addASLifecyclePolicies(p *Policy, enableHookSupport bool) {
|
|||
"autoscaling:DescribeLifecycleHooks",
|
||||
)
|
||||
}
|
||||
// TODO: remove this after k8s 1.29 support is removed
|
||||
// It is no longer needed as of kops 1.29 but to prevent node bootstrap issues
|
||||
// during kops upgrades we keep the permission until it is guaranteed to not be needed.
|
||||
p.unconditionalAction.Insert(
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
)
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
|
@ -245,9 +246,12 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
}
|
||||
modelContext.InstanceID = string(instanceIDBytes)
|
||||
|
||||
modelContext.ConfigurationMode, err = getAWSConfigurationMode(ctx, modelContext)
|
||||
if err != nil {
|
||||
return err
|
||||
// Check if WarmPool is enabled first, to avoid additional API calls
|
||||
if len(modelContext.NodeupConfig.WarmPoolImages) > 0 {
|
||||
modelContext.ConfigurationMode, err = getAWSConfigurationMode(ctx, modelContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
modelContext.MachineType, err = getMachineType()
|
||||
|
@ -712,6 +716,11 @@ func getNodeConfigFromServers(ctx context.Context, bootConfig *nodeup.BootConfig
|
|||
}
|
||||
|
||||
func getAWSConfigurationMode(ctx context.Context, c *model.NodeupModelContext) (string, error) {
|
||||
// Check if WarmPool is enabled first, to avoid additional API calls
|
||||
if len(c.NodeupConfig.WarmPoolImages) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Only worker nodes and apiservers can actually autoscale.
|
||||
// We are not adding describe permissions to the other roles
|
||||
role := c.BootConfig.InstanceGroupRole
|
||||
|
@ -719,20 +728,17 @@ func getAWSConfigurationMode(ctx context.Context, c *model.NodeupModelContext) (
|
|||
return "", nil
|
||||
}
|
||||
|
||||
svc := c.Cloud.(awsup.AWSCloud).Autoscaling()
|
||||
|
||||
result, err := svc.DescribeAutoScalingInstancesWithContext(ctx, &autoscaling.DescribeAutoScalingInstancesInput{
|
||||
InstanceIds: []*string{&c.InstanceID},
|
||||
})
|
||||
targetLifecycleState, err := vfs.Context.ReadFile("metadata://aws/meta-data/autoscaling/target-lifecycle-state")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error describing instances: %v", err)
|
||||
var awsErr awserr.RequestFailure
|
||||
if errors.As(err, &awsErr) && awsErr.StatusCode() == 404 {
|
||||
// The instance isn't in an ASG (karpenter, etc.)
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("error reading target-lifecycle-state from instance metadata: %v", err)
|
||||
}
|
||||
// If the instance is not a part of an ASG, it won't be in a warm pool either.
|
||||
if len(result.AutoScalingInstances) < 1 {
|
||||
return "", nil
|
||||
}
|
||||
lifecycle := fi.ValueOf(result.AutoScalingInstances[0].LifecycleState)
|
||||
if strings.HasPrefix(lifecycle, "Warmed:") {
|
||||
|
||||
if strings.HasPrefix(string(targetLifecycleState), "Warmed:") {
|
||||
klog.Info("instance is entering warm pool")
|
||||
return model.ConfigurationModeWarming, nil
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue