Add GetNodeGpuConfig to cloud provider
* Added GetNodeGpuConfig to cloud provider which returns a GpuConfig struct containing the gpu label, type and resource name if the node has a GPU. * Added initial implementaion of the GetNodeGpuConfig to all cloud providers.
This commit is contained in:
parent
1238e1dfcf
commit
1f646e4095
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -109,6 +110,12 @@ func (ali *aliCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (ali *aliCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(ali, node)
|
||||
}
|
||||
|
||||
func (ali *aliCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
result := make([]cloudprovider.NodeGroup, 0, len(ali.asgs))
|
||||
for _, asg := range ali.asgs {
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
@ -84,6 +85,12 @@ func (aws *awsCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (aws *awsCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(aws, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (aws *awsCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
asgs := aws.awsManager.getAsgs()
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -79,6 +80,13 @@ func (azure *AzureCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (azure *AzureCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(azure, node)
|
||||
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (azure *AzureCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
asgs := azure.azureManager.getNodeGroups()
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
@ -168,6 +169,12 @@ func (baiducloud *baiducloudCloudProvider) GetAvailableGPUTypes() map[string]str
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (baiducloud *baiducloudCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(baiducloud, node)
|
||||
}
|
||||
|
||||
// NodeGroupForNode returns the node group for the given node, nil if the node
|
||||
// should not be processed by cluster autoscaler, or non-nil error if such
|
||||
// occurred. Must be implemented.
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -151,6 +152,12 @@ func (d *bizflycloudCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (d *bizflycloudCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(d, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (d *bizflycloudCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -204,6 +205,13 @@ func (b *brightboxCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (b *brightboxCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
klog.V(4).Info("GetNodeGpuConfig")
|
||||
return gpu.GetNodeGPUFromCloudProvider(b, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is
|
||||
// destroyed, i.e. go routines etc.
|
||||
func (b *brightboxCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -85,6 +86,12 @@ func (ccp *cherryCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (ccp *cherryCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(ccp, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups managed by this cloud provider.
|
||||
func (ccp *cherryCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
groups := make([]cloudprovider.NodeGroup, len(ccp.nodeGroups))
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -146,6 +147,12 @@ func (d *civoCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (d *civoCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(d, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (d *civoCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -86,6 +86,13 @@ const (
|
|||
RancherProviderName = "rancher"
|
||||
)
|
||||
|
||||
// GpuConfig contains the label, type and the resource name for an accelerator (e.g. gpu)
|
||||
type GpuConfig struct {
|
||||
Label string
|
||||
Type string
|
||||
ResourceName apiv1.ResourceName
|
||||
}
|
||||
|
||||
// CloudProvider contains configuration info and functions for interacting with
|
||||
// cloud provider (GCE, AWS, etc).
|
||||
type CloudProvider interface {
|
||||
|
@ -127,6 +134,10 @@ type CloudProvider interface {
|
|||
// GetAvailableGPUTypes return all available GPU types cloud provider supports.
|
||||
GetAvailableGPUTypes() map[string]struct{}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
GetNodeGpuConfig(*apiv1.Node) *GpuConfig
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
Cleanup() error
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
klog "k8s.io/klog/v2"
|
||||
|
@ -103,6 +104,12 @@ func (provider *cloudStackCloudProvider) GetAvailableGPUTypes() map[string]struc
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (provider *cloudStackCloudProvider) GetNodeGpuConfig(node *v1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(provider, node)
|
||||
}
|
||||
|
||||
// Pricing returns pricing model for this cloud provider or error if not available.
|
||||
func (provider *cloudStackCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -128,6 +129,12 @@ func (p *provider) GPULabel() string {
|
|||
return GPULabel
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (p *provider) GetNodeGpuConfig(node *corev1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(p, node)
|
||||
}
|
||||
|
||||
func newProvider(
|
||||
name string,
|
||||
rl *cloudprovider.ResourceLimiter,
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -148,6 +149,12 @@ func (d *digitaloceanCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (d *digitaloceanCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(d, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (d *digitaloceanCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
egoscale "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/exoscale/egoscale/v2"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
)
|
||||
|
||||
var _ cloudprovider.CloudProvider = (*exoscaleCloudProvider)(nil)
|
||||
|
@ -176,6 +177,12 @@ func (e *exoscaleCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (e *exoscaleCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(e, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (e *exoscaleCloudProvider) Cleanup() error {
|
||||
return nil
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -262,6 +263,12 @@ func (e *externalGrpcCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return gpuTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (e *externalGrpcCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(e, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (e *externalGrpcCloudProvider) Cleanup() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), grpcTimeout)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
@ -80,6 +81,12 @@ func (gce *GceCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (gce *GceCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(gce, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (gce *GceCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
migs := gce.gceManager.GetMigs()
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -156,6 +157,12 @@ func (d *HetznerCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (d *HetznerCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(d, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (d *HetznerCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -160,6 +161,12 @@ func (hcp *huaweicloudCloudProvider) GetAvailableGPUTypes() map[string]struct{}
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (hcp *huaweicloudCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(hcp, node)
|
||||
}
|
||||
|
||||
// Cleanup currently does nothing.
|
||||
func (hcp *huaweicloudCloudProvider) Cleanup() error {
|
||||
return nil
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
@ -279,6 +280,12 @@ func (ic *IonosCloudCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (ic *IonosCloudCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(ic, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up read resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (ic *IonosCloudCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -19,16 +19,18 @@ package kamatera
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -110,6 +112,12 @@ func (k *kamateraCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (k *kamateraCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(k, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (k *kamateraCloudProvider) Cleanup() error {
|
||||
return nil
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/client-go/informers"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -107,6 +108,12 @@ func (kubemark *KubemarkCloudProvider) GetAvailableGPUTypes() map[string]struct{
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (kubemark *KubemarkCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(kubemark, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (kubemark *KubemarkCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
result := make([]cloudprovider.NodeGroup, 0, len(kubemark.nodeGroups))
|
||||
|
|
|
@ -65,6 +65,12 @@ func (kubemark *KubemarkCloudProvider) GetAvailableGPUTypes() map[string]struct{
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (kubemark *KubemarkCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(kubemark, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (kubemark *KubemarkCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
return []cloudprovider.NodeGroup{}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -107,6 +108,12 @@ func (l *linodeCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (l *linodeCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(l, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (l *linodeCloudProvider) Cleanup() error {
|
||||
return nil
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -88,6 +89,12 @@ func (mcp *magnumCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (mcp *magnumCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(mcp, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups managed by this cloud provider.
|
||||
func (mcp *magnumCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
mcp.nodeGroupsLock.Lock()
|
||||
|
|
|
@ -16,11 +16,16 @@ limitations under the License.
|
|||
|
||||
package mocks
|
||||
|
||||
import cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
import errors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import resource "k8s.io/apimachinery/pkg/api/resource"
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
import (
|
||||
cloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
errors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
resource "k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// CloudProvider is an autogenerated mock type for the CloudProvider type
|
||||
type CloudProvider struct {
|
||||
|
@ -71,6 +76,23 @@ func (_m *CloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (_m *CloudProvider) GetNodeGpuConfig(_a0 *v1.Node) *cloudprovider.GpuConfig {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 cloudprovider.GpuConfig
|
||||
if rf, ok := ret.Get(0).(func(*v1.Node) cloudprovider.GpuConfig); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(cloudprovider.GpuConfig)
|
||||
}
|
||||
}
|
||||
|
||||
return &r0
|
||||
}
|
||||
|
||||
// GetAvailableMachineTypes provides a mock function with given fields:
|
||||
func (_m *CloudProvider) GetAvailableMachineTypes() ([]string, error) {
|
||||
ret := _m.Called()
|
||||
|
|
|
@ -17,17 +17,19 @@ You may obtain a copy of the License at
|
|||
package oci
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -143,6 +145,12 @@ func (ocp *OciCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return map[string]struct{}{}
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (ocp *OciCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(ocp, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (ocp *OciCloudProvider) Cleanup() error {
|
||||
return ocp.poolManager.Cleanup()
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud/sdk"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -281,6 +282,12 @@ func (provider *OVHCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return gpuTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (provider *OVHCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(provider, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed,
|
||||
// i.e. go routines etc.
|
||||
func (provider *OVHCloudProvider) Cleanup() error {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -81,6 +82,12 @@ func (pcp *packetCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (pcp *packetCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(pcp, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups managed by this cloud provider.
|
||||
func (pcp *packetCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
groups := make([]cloudprovider.NodeGroup, len(pcp.nodeGroups))
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
autoscalererrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -126,6 +127,12 @@ func (provider *RancherCloudProvider) GetAvailableGPUTypes() map[string]struct{}
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (provider *RancherCloudProvider) GetNodeGpuConfig(node *corev1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(provider, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (provider *RancherCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
nodeGroups := make([]cloudprovider.NodeGroup, len(provider.nodeGroups))
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway/scalewaygo"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
ca_errors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -225,6 +226,13 @@ func (scw *scalewayCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (scw *scalewayCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
klog.V(6).Info("GetNodeGpuConfig,called")
|
||||
return gpu.GetNodeGPUFromCloudProvider(scw, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (scw *scalewayCloudProvider) Cleanup() error {
|
||||
klog.V(4).Info("Cleanup,called")
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -124,6 +125,12 @@ func (tencentcloud *tencentCloudProvider) GetAvailableGPUTypes() map[string]stru
|
|||
return availableGPUTypes
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (tencentcloud *tencentCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(tencentcloud, node)
|
||||
}
|
||||
|
||||
// Pricing returns pricing model for this cloud provider or error if not available.
|
||||
func (tencentcloud *tencentCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
|
@ -120,6 +121,12 @@ func (tcp *TestCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
}
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (tcp *TestCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(tcp, node)
|
||||
}
|
||||
|
||||
// NodeGroups returns all node groups configured for this cloud provider.
|
||||
func (tcp *TestCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
|
||||
tcp.Lock()
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
@ -121,6 +122,12 @@ func (v *vultrCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetNodeGpuConfig returns the label, type and resource name for the GPU added to node. If node doesn't have
|
||||
// any GPUs, it returns nil.
|
||||
func (v *vultrCloudProvider) GetNodeGpuConfig(node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
return gpu.GetNodeGPUFromCloudProvider(v, node)
|
||||
}
|
||||
|
||||
// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc.
|
||||
func (v *vultrCloudProvider) Cleanup() error {
|
||||
return nil
|
||||
|
|
|
@ -118,3 +118,13 @@ func PodRequestsGpu(pod *apiv1.Pod) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetNodeGPUFromCloudProvider returns the GPU the node has. Returned GPU has the GPU label of the
|
||||
// passed in cloud provider. If the node doesn't have a GPU, returns nil.
|
||||
func GetNodeGPUFromCloudProvider(provider cloudprovider.CloudProvider, node *apiv1.Node) *cloudprovider.GpuConfig {
|
||||
gpuLabel := provider.GPULabel()
|
||||
if NodeHasGpu(gpuLabel, node) {
|
||||
return &cloudprovider.GpuConfig{Label: gpuLabel, Type: node.Labels[gpuLabel], ResourceName: ResourceNvidiaGPU}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue