[Azure VMs Pool] Support mixed agentpool types in Azure Cache

This commit is contained in:
wenxuanW 2024-05-14 10:03:29 -07:00
parent f783488681
commit ba6977e7e6
7 changed files with 403 additions and 39 deletions

View File

@ -51,6 +51,7 @@ type azureCache struct {
// Cache content.
resourceGroup string
vmType string
vmsPoolSet map[string]struct{} // track the nodepools that're vms pool
scaleSets map[string]compute.VirtualMachineScaleSet
virtualMachines map[string][]compute.VirtualMachine
registeredNodeGroups []cloudprovider.NodeGroup
@ -67,6 +68,7 @@ func newAzureCache(client *azClient, cacheTTL time.Duration, resourceGroup, vmTy
refreshInterval: cacheTTL,
resourceGroup: resourceGroup,
vmType: vmType,
vmsPoolSet: make(map[string]struct{}),
scaleSets: make(map[string]compute.VirtualMachineScaleSet),
virtualMachines: make(map[string][]compute.VirtualMachine),
registeredNodeGroups: make([]cloudprovider.NodeGroup, 0),
@ -87,6 +89,13 @@ func newAzureCache(client *azClient, cacheTTL time.Duration, resourceGroup, vmTy
return cache, nil
}
func (m *azureCache) getVMsPoolSet() map[string]struct{} {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.vmsPoolSet
}
func (m *azureCache) getVirtualMachines() map[string][]compute.VirtualMachine {
m.mutex.Lock()
defer m.mutex.Unlock()
@ -165,54 +174,77 @@ func (m *azureCache) fetchAzureResources() error {
m.mutex.Lock()
defer m.mutex.Unlock()
switch m.vmType {
case vmTypeVMSS:
// List all VMSS in the RG.
vmssResult, err := m.fetchScaleSets()
if err == nil {
m.scaleSets = vmssResult
} else {
return err
}
case vmTypeStandard:
// List all VMs in the RG.
vmResult, err := m.fetchVirtualMachines()
if err == nil {
m.virtualMachines = vmResult
} else {
return err
}
// fetch all the resources since CAS may be operating on mixed nodepools
// including both VMSS and VMs pools
vmssResult, err := m.fetchScaleSets()
if err == nil {
m.scaleSets = vmssResult
} else {
return err
}
vmResult, vmsPoolSet, err := m.fetchVirtualMachines()
if err == nil {
m.virtualMachines = vmResult
m.vmsPoolSet = vmsPoolSet
} else {
return err
}
return nil
}
const (
legacyAgentpoolNameTag = "poolName"
agentpoolNameTag = "aks-managed-poolName"
agentpoolTypeTag = "aks-managed-agentpool-type"
vmsPoolType = "VirtualMachines"
)
// fetchVirtualMachines returns the updated list of virtual machines in the config resource group using the Azure API.
func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine, error) {
func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine, map[string]struct{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := m.azClient.virtualMachinesClient.List(ctx, m.resourceGroup)
if err != nil {
klog.Errorf("VirtualMachinesClient.List in resource group %q failed: %v", m.resourceGroup, err)
return nil, err.Error()
return nil, nil, err.Error()
}
instances := make(map[string][]compute.VirtualMachine)
// track the nodepools that're vms pools
vmsPoolSet := make(map[string]struct{})
for _, instance := range result {
if instance.Tags == nil {
continue
}
tags := instance.Tags
vmPoolName := tags["poolName"]
vmPoolName := tags[agentpoolNameTag]
// fall back to legacy tag name if not found
if vmPoolName == nil {
vmPoolName = tags[legacyAgentpoolNameTag]
}
if vmPoolName == nil {
continue
}
instances[to.String(vmPoolName)] = append(instances[to.String(vmPoolName)], instance)
// if the nodepool is already in the map, skip it
if _, ok := vmsPoolSet[to.String(vmPoolName)]; ok {
continue
}
// nodes from vms pool will have tag "aks-managed-agentpool-type" set to "VirtualMachines"
if agnetpoolType := tags[agentpoolTypeTag]; agnetpoolType != nil {
if strings.EqualFold(to.String(agnetpoolType), vmsPoolType) {
vmsPoolSet[to.String(vmPoolName)] = struct{}{}
}
}
}
return instances, nil
return instances, vmsPoolSet, nil
}
// fetchScaleSets returns the updated list of scale sets in the config resource group using the Azure API.
@ -323,6 +355,7 @@ func (m *azureCache) getAutoscalingOptions(ref azureRef) map[string]string {
// FindForInstance returns node group of the given Instance
func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudprovider.NodeGroup, error) {
vmsPoolSet := m.getVMsPoolSet()
m.mutex.Lock()
defer m.mutex.Unlock()
@ -340,7 +373,8 @@ func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudpr
return nil, nil
}
if vmType == vmTypeVMSS {
// cluster with vmss pool only
if vmType == vmTypeVMSS && len(vmsPoolSet) == 0 {
if m.areAllScaleSetsUniform() {
// Omit virtual machines not managed by vmss only in case of uniform scale set.
if ok := virtualMachineRE.Match([]byte(inst.Name)); ok {

View File

@ -44,6 +44,9 @@ func newTestAzureManager(t *testing.T) *AzureManager {
mockVMSSClient.EXPECT().List(gomock.Any(), "rg").Return(expectedScaleSets, nil).AnyTimes()
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), "rg", "test-vmss", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
mockVMClient := mockvmclient.NewMockInterface(ctrl)
expectedVMs := newTestVMList(3)
mockVMClient.EXPECT().List(gomock.Any(), "rg").Return(expectedVMs, nil).AnyTimes()
manager := &AzureManager{
env: azure.PublicCloud,
@ -58,6 +61,7 @@ func newTestAzureManager(t *testing.T) *AzureManager {
azClient: &azClient{
virtualMachineScaleSetsClient: mockVMSSClient,
virtualMachineScaleSetVMsClient: mockVMSSVMClient,
virtualMachinesClient: mockVMClient,
deploymentsClient: &DeploymentsClientMock{
FakeStore: map[string]resources.DeploymentExtended{
"deployment": {
@ -116,9 +120,68 @@ func TestNodeGroups(t *testing.T) {
assert.Equal(t, len(provider.NodeGroups()), 0)
registered := provider.azureManager.RegisterNodeGroup(
newTestScaleSet(provider.azureManager, "test-asg"))
newTestScaleSet(provider.azureManager, "test-asg"),
)
assert.True(t, registered)
assert.Equal(t, len(provider.NodeGroups()), 1)
registered = provider.azureManager.RegisterNodeGroup(
newTestVMsPool(provider.azureManager, "test-vms-pool"),
)
assert.True(t, registered)
assert.Equal(t, len(provider.NodeGroups()), 2)
}
func TestMixedNodeGroups(t *testing.T) {
ctrl := gomock.NewController(t)
provider := newTestProvider(t)
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform)
expectedVMsPoolVMs := newTestVMsPoolVMList(3)
expectedVMSSVMs := newTestVMSSVMList(3)
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMsPoolVMs, nil).AnyTimes()
mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
assert.Equal(t, len(provider.NodeGroups()), 0)
registered := provider.azureManager.RegisterNodeGroup(
newTestScaleSet(provider.azureManager, "test-asg"),
)
provider.azureManager.explicitlyConfigured["test-asg"] = true
assert.True(t, registered)
registered = provider.azureManager.RegisterNodeGroup(
newTestVMsPool(provider.azureManager, "test-vms-pool"),
)
provider.azureManager.explicitlyConfigured["test-vms-pool"] = true
assert.True(t, registered)
assert.Equal(t, len(provider.NodeGroups()), 2)
// refresh cache
provider.azureManager.forceRefresh()
// node from vmss pool
node := newApiNode(compute.Uniform, 0)
group, err := provider.NodeGroupForNode(node)
assert.NoError(t, err)
assert.NotNil(t, group, "Group should not be nil")
assert.Equal(t, group.Id(), "test-asg")
assert.Equal(t, group.MinSize(), 1)
assert.Equal(t, group.MaxSize(), 5)
// node from vms pool
vmsPoolNode := newVMsNode(0)
group, err = provider.NodeGroupForNode(vmsPoolNode)
assert.NoError(t, err)
assert.NotNil(t, group, "Group should not be nil")
assert.Equal(t, group.Id(), "test-vms-pool")
assert.Equal(t, group.MinSize(), 3)
assert.Equal(t, group.MaxSize(), 10)
}
func TestNodeGroupForNode(t *testing.T) {
@ -136,6 +199,9 @@ func TestNodeGroupForNode(t *testing.T) {
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil)
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes()
if orchMode == compute.Uniform {
@ -144,10 +210,8 @@ func TestNodeGroupForNode(t *testing.T) {
provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
} else {
mockVMClient := mockvmclient.NewMockInterface(ctrl)
provider.azureManager.config.EnableVmssFlex = true
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
}

View File

@ -161,6 +161,11 @@ func (m *AzureManager) buildNodeGroupFromSpec(spec string) (cloudprovider.NodeGr
return nil, fmt.Errorf("failed to parse node group spec: %v", err)
}
vmsPoolSet := m.azureCache.getVMsPoolSet()
if _, ok := vmsPoolSet[s.Name]; ok {
return NewVMsPool(s, m), nil
}
switch m.config.VMType {
case vmTypeStandard:
return NewAgentPool(s, m)

View File

@ -144,6 +144,7 @@ func TestCreateAzureManagerValidConfig(t *testing.T) {
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2)
mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2)
mockAzClient := &azClient{
virtualMachinesClient: mockVMClient,
virtualMachineScaleSetsClient: mockVMSSClient,
@ -226,6 +227,7 @@ func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) {
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2)
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2)
mockAzClient := &azClient{
virtualMachinesClient: mockVMClient,
virtualMachineScaleSetsClient: mockVMSSClient,
@ -338,6 +340,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) {
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes()
mockVMClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachine{}, nil).AnyTimes()
mockAzClient := &azClient{
virtualMachinesClient: mockVMClient,
virtualMachineScaleSetsClient: mockVMSSClient,
@ -737,6 +740,9 @@ func TestFetchAutoAsgsVmss(t *testing.T) {
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
manager.azClient.virtualMachinesClient = mockVMClient
mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
err := manager.forceRefresh()
assert.NoError(t, err)

View File

@ -179,6 +179,9 @@ func TestTargetSize(t *testing.T) {
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
if orchMode == compute.Uniform {
@ -188,9 +191,7 @@ func TestTargetSize(t *testing.T) {
} else {
provider.azureManager.config.EnableVmssFlex = true
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
}
err := provider.azureManager.forceRefresh()
@ -235,18 +236,18 @@ func TestIncreaseSize(t *testing.T) {
// This is a future TODO: sync.WaitGroup should be used in actual code and make code easily testable
mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), provider.azureManager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes()
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
if orchMode == compute.Uniform {
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, testASG, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
} else {
provider.azureManager.config.EnableVmssFlex = true
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), testASG).Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
}
err := provider.azureManager.forceRefresh()
assert.NoError(t, err)
@ -349,6 +350,7 @@ func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", compute.Uniform)
expectedVMSSVMs := newTestVMSSVMList(3)
expectedVMs := newTestVMList(3)
expectedVMSSVMs[2].ProvisioningState = to.StringPtr(provisioningStateFailed)
if !testCase.isMissingInstanceView {
expectedVMSSVMs[2].InstanceView = &compute.VirtualMachineScaleSetVMInstanceView{Statuses: &testCase.statuses}
@ -362,6 +364,9 @@ func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes()
manager.azClient.virtualMachinesClient = mockVMClient
manager.explicitlyConfigured["vmss-failed-upscale"] = true
registered := manager.RegisterNodeGroup(newTestScaleSet(manager, vmssName))
assert.True(t, registered)
@ -451,6 +456,9 @@ func TestBelongs(t *testing.T) {
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil)
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
if orchMode == compute.Uniform {
@ -461,9 +469,7 @@ func TestBelongs(t *testing.T) {
} else {
provider.azureManager.config.EnableVmssFlex = true
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
}
registered := provider.azureManager.RegisterNodeGroup(
@ -544,6 +550,8 @@ func TestDeleteNodes(t *testing.T) {
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMClient := mockvmclient.NewMockInterface(ctrl)
manager.azClient.virtualMachinesClient = mockVMClient
mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes()
if orchMode == compute.Uniform {
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
@ -551,7 +559,6 @@ func TestDeleteNodes(t *testing.T) {
} else {
manager.config.EnableVmssFlex = true
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
manager.azClient.virtualMachinesClient = mockVMClient
}
@ -671,7 +678,9 @@ func TestDeleteNodeUnregistered(t *testing.T) {
mockVMSSClient.EXPECT().DeleteInstancesAsync(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), enableForceDelete).Return(nil, nil)
mockVMSSClient.EXPECT().WaitForDeleteInstancesResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
manager.azClient.virtualMachinesClient = mockVMClient
if orchMode == compute.Uniform {
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
@ -680,9 +689,7 @@ func TestDeleteNodeUnregistered(t *testing.T) {
} else {
manager.config.EnableVmssFlex = true
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
manager.azClient.virtualMachinesClient = mockVMClient
}
err := manager.forceRefresh()
assert.NoError(t, err)
@ -828,6 +835,9 @@ func TestScaleSetNodes(t *testing.T) {
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
if orchMode == compute.Uniform {
@ -837,9 +847,7 @@ func TestScaleSetNodes(t *testing.T) {
} else {
provider.azureManager.config.EnableVmssFlex = true
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
}
registered := provider.azureManager.RegisterNodeGroup(
@ -894,6 +902,7 @@ func TestEnableVmssFlexFlag(t *testing.T) {
provider.azureManager.config.EnableVmssFlex = false
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes()
mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes()
provider.azureManager.azClient.virtualMachinesClient = mockVMClient

View File

@ -0,0 +1,179 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
// VMsPool is single instance VM pool
// this is a placeholder for now, no real implementation
type VMsPool struct {
azureRef
manager *AzureManager
resourceGroup string
minSize int
maxSize int
curSize int64
// sizeMutex sync.Mutex
// lastSizeRefresh time.Time
}
// NewVMsPool creates a new VMsPool
func NewVMsPool(spec *dynamic.NodeGroupSpec, am *AzureManager) *VMsPool {
nodepool := &VMsPool{
azureRef: azureRef{
Name: spec.Name,
},
manager: am,
resourceGroup: am.config.ResourceGroup,
curSize: -1,
minSize: spec.MinSize,
maxSize: spec.MaxSize,
}
return nodepool
}
// MinSize returns the minimum size the cluster is allowed to scaled down
// to as provided by the node spec in --node parameter.
func (agentPool *VMsPool) MinSize() int {
return agentPool.minSize
}
// Exist is always true since we are initialized with an existing agentpool
func (agentPool *VMsPool) Exist() bool {
return true
}
// Create creates the node group on the cloud provider side.
func (agentPool *VMsPool) Create() (cloudprovider.NodeGroup, error) {
return nil, cloudprovider.ErrAlreadyExist
}
// Delete deletes the node group on the cloud provider side.
func (agentPool *VMsPool) Delete() error {
return cloudprovider.ErrNotImplemented
}
// Autoprovisioned is always false since we are initialized with an existing agentpool
func (agentPool *VMsPool) Autoprovisioned() bool {
return false
}
// GetOptions returns NodeGroupAutoscalingOptions that should be used for this particular
// NodeGroup. Returning a nil will result in using default options.
func (agentPool *VMsPool) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) {
// TODO(wenxuan): Implement this method
return nil, cloudprovider.ErrNotImplemented
}
// MaxSize returns the maximum size scale limit provided by --node
// parameter to the autoscaler main
func (agentPool *VMsPool) MaxSize() int {
return agentPool.maxSize
}
// TargetSize returns the current TARGET size of the node group. It is possible that the
// number is different from the number of nodes registered in Kubernetes.
func (agentPool *VMsPool) TargetSize() (int, error) {
// TODO(wenxuan): Implement this method
return -1, cloudprovider.ErrNotImplemented
}
// IncreaseSize increase the size through a PUT AP call. It calculates the expected size
// based on a delta provided as parameter
func (agentPool *VMsPool) IncreaseSize(delta int) error {
// TODO(wenxuan): Implement this method
return cloudprovider.ErrNotImplemented
}
// DeleteNodes extracts the providerIDs from the node spec and
// delete or deallocate the nodes from the agent pool based on the scale down policy.
func (agentPool *VMsPool) DeleteNodes(nodes []*apiv1.Node) error {
// TODO(wenxuan): Implement this method
return cloudprovider.ErrNotImplemented
}
// DecreaseTargetSize decreases the target size of the node group.
func (agentPool *VMsPool) DecreaseTargetSize(delta int) error {
// TODO(wenxuan): Implement this method
return cloudprovider.ErrNotImplemented
}
// Id returns the name of the agentPool
func (agentPool *VMsPool) Id() string {
return agentPool.azureRef.Name
}
// Debug returns a string with basic details of the agentPool
func (agentPool *VMsPool) Debug() string {
return fmt.Sprintf("%s (%d:%d)", agentPool.Id(), agentPool.MinSize(), agentPool.MaxSize())
}
func (agentPool *VMsPool) getVMsFromCache() ([]compute.VirtualMachine, error) {
// vmsPoolMap is a map of agent pool name to the list of virtual machines
vmsPoolMap := agentPool.manager.azureCache.getVirtualMachines()
if _, ok := vmsPoolMap[agentPool.Name]; !ok {
return []compute.VirtualMachine{}, fmt.Errorf("vms pool %s not found in the cache", agentPool.Name)
}
return vmsPoolMap[agentPool.Name], nil
}
// Nodes returns the list of nodes in the vms agentPool.
func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) {
vms, err := agentPool.getVMsFromCache()
if err != nil {
return nil, err
}
nodes := make([]cloudprovider.Instance, 0, len(vms))
for _, vm := range vms {
if len(*vm.ID) == 0 {
continue
}
resourceID, err := convertResourceGroupNameToLower("azure://" + *vm.ID)
if err != nil {
return nil, err
}
nodes = append(nodes, cloudprovider.Instance{Id: resourceID})
}
return nodes, nil
}
// TemplateNodeInfo is not implemented.
func (agentPool *VMsPool) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
return nil, cloudprovider.ErrNotImplemented
}
// AtomicIncreaseSize is not implemented.
func (agentPool *VMsPool) AtomicIncreaseSize(delta int) error {
return cloudprovider.ErrNotImplemented
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
"github.com/Azure/go-autorest/autorest/to"
apiv1 "k8s.io/api/core/v1"
)
func newTestVMsPool(manager *AzureManager, name string) *VMsPool {
return &VMsPool{
azureRef: azureRef{
Name: name,
},
manager: manager,
minSize: 3,
maxSize: 10,
}
}
const (
fakeVMsPoolVMID = "/subscriptions/test-subscription-id/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/%d"
)
func newTestVMsPoolVMList(count int) []compute.VirtualMachine {
var vmList []compute.VirtualMachine
for i := 0; i < count; i++ {
vm := compute.VirtualMachine{
ID: to.StringPtr(fmt.Sprintf(fakeVMsPoolVMID, i)),
VirtualMachineProperties: &compute.VirtualMachineProperties{
VMID: to.StringPtr(fmt.Sprintf("123E4567-E89B-12D3-A456-426655440000-%d", i)),
},
Tags: map[string]*string{
agentpoolTypeTag: to.StringPtr("VirtualMachines"),
agentpoolNameTag: to.StringPtr("test-vms-pool"),
},
}
vmList = append(vmList, vm)
}
return vmList
}
func newVMsNode(vmID int64) *apiv1.Node {
node := &apiv1.Node{
Spec: apiv1.NodeSpec{
ProviderID: "azure://" + fmt.Sprintf(fakeVMsPoolVMID, vmID),
},
}
return node
}