diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go index 0c8e20ffff..e549e7db58 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go @@ -281,7 +281,7 @@ func buildKubeProxy(mig *Mig) *apiv1.Pod { Resources: apiv1.ResourceRequirements{ Requests: apiv1.ResourceList{ apiv1.ResourceCPU: *resource.NewMilliQuantity( - int64(100), + int64(KubeProxyCpuRequestMillis), resource.DecimalSI), }, }, diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager.go b/cluster-autoscaler/cloudprovider/gce/gce_manager.go index 98c2f58368..8f3859911c 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager.go @@ -305,12 +305,21 @@ func (m *GceManager) buildNodeFromTemplate(mig *Mig, template *gce.InstanceTempl // TODO: handle custom !!!! // TODO: handle GPU - machineType, err := m.service.MachineTypes.Get(mig.Project, mig.Zone, template.Properties.MachineType).Do() - if err != nil { - return nil, err + if strings.HasPrefix(template.Properties.MachineType, "custom-") { + cpu, mem, err := parseCustomMachineType(template.Properties.MachineType) + if err != nil { + return nil, err + } + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(cpu, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI) + } else { + machineType, err := m.service.MachineTypes.Get(mig.Project, mig.Zone, template.Properties.MachineType).Do() + if err != nil { + return nil, err + } + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(machineType.GuestCpus, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(machineType.MemoryMb*1024*1024, resource.DecimalSI) } - node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(machineType.GuestCpus, resource.DecimalSI) - node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(machineType.MemoryMb, resource.DecimalSI) // TODO: use proper allocatable!! node.Status.Allocatable = node.Status.Capacity @@ -367,6 +376,21 @@ func buildGenericLabels(ref GceRef, machineType string, nodeName string) (map[st return result, nil } +func parseCustomMachineType(machineType string) (cpu, mem int64, err error) { + // example custom-2-2816 + var count int + count, err = fmt.Sscanf(machineType, "custom-%d-%d", &cpu, &mem) + if err != nil { + return + } + if count != 2 { + return 0, 0, fmt.Errorf("failed to parse all params in %s", machineType) + } + // Mb to bytes + mem = mem * 1024 * 1024 + return +} + func buildReadyConditions() []apiv1.NodeCondition { lastTransition := time.Now().Add(-time.Minute) return []apiv1.NodeCondition{ diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go index 70638ad46c..7448248cb4 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go @@ -64,3 +64,14 @@ func TestBuildReadyConditions(t *testing.T) { } assert.True(t, foundReady) } + +func TestParseCustomMachineType(t *testing.T) { + cpu, mem, err := parseCustomMachineType("custom-2-2816") + assert.NoError(t, err) + assert.Equal(t, int64(2), cpu) + assert.Equal(t, int64(2816*1024*1024), mem) + cpu, mem, err = parseCustomMachineType("other-a2-2816") + assert.Error(t, err) + cpu, mem, err = parseCustomMachineType("other-2-2816") + assert.Error(t, err) +}