implemented TemplateNodeInfo function
This commit is contained in:
parent
a0685e1184
commit
62edf2291a
|
|
@ -158,7 +158,17 @@ func (asg *AutoScalingGroup) Nodes() ([]cloudprovider.Instance, error) {
|
|||
// capacity and allocatable information as well as all pods that are started on
|
||||
// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
|
||||
func (asg *AutoScalingGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
template, err := asg.cloudServiceManager.getAsgTemplate(asg.groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err := asg.cloudServiceManager.buildNodeFromTemplate(asg.groupName, template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.groupName))
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
// Exist checks if the node group really exists on the cloud provider side. Allows to tell the
|
||||
|
|
|
|||
|
|
@ -18,8 +18,14 @@ package huaweicloud
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/core/sdktime"
|
||||
|
|
@ -27,7 +33,9 @@ import (
|
|||
huaweicloudsdkasmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/as/v1/model"
|
||||
huaweicloudsdkecs "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/ecs/v2"
|
||||
huaweicloudsdkecsmodel "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/services/ecs/v2/model"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
// ElasticCloudServerService represents the elastic cloud server interfaces.
|
||||
|
|
@ -53,6 +61,12 @@ type AutoScalingService interface {
|
|||
// The delta should be non-negative.
|
||||
// IncreaseSizeInstance wait until instance number is updated.
|
||||
IncreaseSizeInstance(groupID string, delta int) error
|
||||
|
||||
// Get default auto scaling group template
|
||||
getAsgTemplate(groupID string) (*asgTemplate, error)
|
||||
|
||||
// buildNodeFromTemplate returns template from instance flavor
|
||||
buildNodeFromTemplate(asgName string, template *asgTemplate) (*apiv1.Node, error)
|
||||
}
|
||||
|
||||
// CloudServiceManager represents the cloud service interfaces.
|
||||
|
|
@ -71,6 +85,16 @@ type cloudServiceManager struct {
|
|||
getASClientFunc func() *huaweicloudsdkas.AsClient
|
||||
}
|
||||
|
||||
type asgTemplate struct {
|
||||
name string
|
||||
vcpu int64
|
||||
ram int64
|
||||
gpu int64
|
||||
region string
|
||||
zone string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
func newCloudServiceManager(cloudConfig *CloudConfig) *cloudServiceManager {
|
||||
return &cloudServiceManager{
|
||||
cloudConfig: cloudConfig,
|
||||
|
|
@ -389,3 +413,129 @@ func (csm *cloudServiceManager) deleteScalingPolicy(opts *huaweicloudsdkasmodel.
|
|||
klog.V(1).Infof("delete scaling policy succeed. policy id: %s", opts.ScalingPolicyId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (csm *cloudServiceManager) getScalingGroupByID(groupID string) (*huaweicloudsdkasmodel.ScalingGroups, error) {
|
||||
asClient := csm.getASClientFunc()
|
||||
opts := &huaweicloudsdkasmodel.ShowScalingGroupRequest{
|
||||
ScalingGroupId: groupID,
|
||||
}
|
||||
response, err := asClient.ShowScalingGroup(opts)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to show scaling group info. group: %s, error: %v", groupID, err)
|
||||
return nil, err
|
||||
}
|
||||
if response == nil || response.ScalingGroup == nil {
|
||||
return nil, fmt.Errorf("no scaling group found: %s", groupID)
|
||||
}
|
||||
|
||||
return response.ScalingGroup, nil
|
||||
}
|
||||
|
||||
func (csm *cloudServiceManager) getScalingGroupConfigByID(groupID, configID string) (*huaweicloudsdkasmodel.ScalingConfiguration, error) {
|
||||
asClient := csm.getASClientFunc()
|
||||
opts := &huaweicloudsdkasmodel.ShowScalingConfigRequest{
|
||||
ScalingConfigurationId: configID,
|
||||
}
|
||||
response, err := asClient.ShowScalingConfig(opts)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to show scaling group config. config id: %s, error: %v", configID, err)
|
||||
return nil, err
|
||||
}
|
||||
if response == nil || response.ScalingConfiguration == nil {
|
||||
return nil, fmt.Errorf("no instance in scaling group: %s", groupID)
|
||||
}
|
||||
return response.ScalingConfiguration, nil
|
||||
}
|
||||
|
||||
func (csm *cloudServiceManager) listFlavors(az string) (*[]huaweicloudsdkecsmodel.Flavor, error) {
|
||||
ecsClient := csm.getECSClientFunc()
|
||||
opts := &huaweicloudsdkecsmodel.ListFlavorsRequest{
|
||||
AvailabilityZone: &az,
|
||||
}
|
||||
response, err := ecsClient.ListFlavors(opts)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list flavors. availability zone: %s", az)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response.Flavors, nil
|
||||
}
|
||||
|
||||
func (csm *cloudServiceManager) getAsgTemplate(groupID string) (*asgTemplate, error) {
|
||||
sg, err := csm.getScalingGroupByID(groupID)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get ASG by id:%s,because of %s", groupID, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configuration, err := csm.getScalingGroupConfigByID(groupID, *sg.ScalingConfigurationId)
|
||||
|
||||
for _, az := range *sg.AvailableZones {
|
||||
flavors, err := csm.listFlavors(az)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list flavors, available zone is: %s, error: %v", az, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, flavor := range *flavors {
|
||||
if !strings.EqualFold(flavor.Name, *configuration.InstanceConfig.FlavorRef) {
|
||||
continue
|
||||
}
|
||||
|
||||
vcpus, _ := strconv.ParseInt(flavor.Vcpus, 10, 64)
|
||||
return &asgTemplate{
|
||||
name: flavor.Name,
|
||||
vcpu: vcpus,
|
||||
ram: int64(flavor.Ram),
|
||||
zone: az,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (csm *cloudServiceManager) buildNodeFromTemplate(asgName string, template *asgTemplate) (*apiv1.Node, error) {
|
||||
node := apiv1.Node{}
|
||||
nodeName := fmt.Sprintf("%s-asg-%d", asgName, rand.Int63())
|
||||
|
||||
node.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
SelfLink: fmt.Sprintf("/api/v1/nodes/%s", nodeName),
|
||||
Labels: map[string]string{},
|
||||
}
|
||||
|
||||
node.Status = apiv1.NodeStatus{
|
||||
Capacity: apiv1.ResourceList{},
|
||||
}
|
||||
// TODO: get a real value.
|
||||
node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI)
|
||||
node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.vcpu, resource.DecimalSI)
|
||||
node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(template.gpu, resource.DecimalSI)
|
||||
node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.ram*1024*1024, resource.DecimalSI)
|
||||
|
||||
node.Status.Allocatable = node.Status.Capacity
|
||||
|
||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(template, nodeName))
|
||||
|
||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||
return &node, nil
|
||||
}
|
||||
|
||||
func buildGenericLabels(template *asgTemplate, nodeName string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
|
||||
result[kubeletapis.LabelOS] = cloudprovider.DefaultOS
|
||||
|
||||
result[apiv1.LabelInstanceType] = template.name
|
||||
|
||||
result[apiv1.LabelZoneRegion] = template.region
|
||||
result[apiv1.LabelZoneFailureDomain] = template.zone
|
||||
result[apiv1.LabelHostname] = nodeName
|
||||
|
||||
// append custom node labels
|
||||
for key, value := range template.tags {
|
||||
result[key] = value
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue