Add regional flag
This commit is contained in:
parent
887408b6a6
commit
f98e953eb4
|
|
@ -53,15 +53,17 @@ type CloudProviderBuilder struct {
|
|||
cloudConfig string
|
||||
clusterName string
|
||||
autoprovisioningEnabled bool
|
||||
regional bool
|
||||
}
|
||||
|
||||
// NewCloudProviderBuilder builds a new builder from static settings
|
||||
func NewCloudProviderBuilder(cloudProviderFlag string, cloudConfig string, clusterName string, autoprovisioningEnabled bool) CloudProviderBuilder {
|
||||
func NewCloudProviderBuilder(cloudProviderFlag, cloudConfig, clusterName string, autoprovisioningEnabled, regional bool) CloudProviderBuilder {
|
||||
return CloudProviderBuilder{
|
||||
cloudProviderFlag: cloudProviderFlag,
|
||||
cloudConfig: cloudConfig,
|
||||
clusterName: clusterName,
|
||||
autoprovisioningEnabled: autoprovisioningEnabled,
|
||||
regional: regional,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -107,7 +109,7 @@ func (b CloudProviderBuilder) buildGCE(do cloudprovider.NodeGroupDiscoveryOption
|
|||
defer config.Close()
|
||||
}
|
||||
|
||||
manager, err := gce.CreateGceManager(config, mode, b.clusterName, do)
|
||||
manager, err := gce.CreateGceManager(config, mode, b.clusterName, do, b.regional)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create GCE Manager: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ type gceManagerImpl struct {
|
|||
mode GcpCloudProviderMode
|
||||
templates *templateBuilder
|
||||
interrupt chan struct{}
|
||||
isRegional bool
|
||||
regional bool
|
||||
explicitlyConfigured map[GceRef]bool
|
||||
migAutoDiscoverySpecs []cloudprovider.MIGAutoDiscoveryConfig
|
||||
resourceLimiter *cloudprovider.ResourceLimiter
|
||||
|
|
@ -160,7 +160,7 @@ type gceManagerImpl struct {
|
|||
}
|
||||
|
||||
// CreateGceManager constructs gceManager object.
|
||||
func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, clusterName string, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions) (GceManager, error) {
|
||||
func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, clusterName string, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions, regional bool) (GceManager, error) {
|
||||
// Create Google Compute Engine token.
|
||||
var err error
|
||||
tokenSource := google.ComputeTokenSource("")
|
||||
|
|
@ -171,7 +171,6 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
|
|||
}
|
||||
}
|
||||
var projectId, location string
|
||||
var isRegional bool
|
||||
if configReader != nil {
|
||||
var cfg provider_gce.ConfigFile
|
||||
if err := gcfg.ReadInto(&cfg, configReader); err != nil {
|
||||
|
|
@ -185,7 +184,6 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
|
|||
glog.V(1).Infof("Using TokenSource from config %#v", tokenSource)
|
||||
}
|
||||
projectId = cfg.Global.ProjectID
|
||||
isRegional = cfg.Global.Multizone
|
||||
location = cfg.Global.LocalZone
|
||||
} else {
|
||||
glog.V(1).Infof("Using default TokenSource %#v", tokenSource)
|
||||
|
|
@ -196,7 +194,7 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
|
|||
// be specified in config. For now we can just assume that hosted
|
||||
// master project is in the same zone as cluster and only use
|
||||
// discoveredZone.
|
||||
discoveredProjectId, discoveredLocation, err := getProjectAndLocation(isRegional)
|
||||
discoveredProjectId, discoveredLocation, err := getProjectAndLocation(regional)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -220,7 +218,7 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
|
|||
gceService: gceService,
|
||||
migCache: make(map[GceRef]*Mig),
|
||||
location: location,
|
||||
isRegional: isRegional,
|
||||
regional: regional,
|
||||
projectId: projectId,
|
||||
clusterName: clusterName,
|
||||
mode: mode,
|
||||
|
|
@ -250,7 +248,7 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
|
|||
gkeService.BasePath = *gkeAPIEndpoint
|
||||
}
|
||||
manager.gkeService = gkeService
|
||||
if manager.isRegional {
|
||||
if manager.regional {
|
||||
gkeBetaService, err := gke_beta.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -319,7 +317,7 @@ func (m *gceManagerImpl) fetchAllNodePools() error {
|
|||
if m.mode == ModeGKENAP {
|
||||
return m.fetchAllNodePoolsGkeNapImpl()
|
||||
}
|
||||
if m.isRegional {
|
||||
if m.regional {
|
||||
return m.fetchAllNodePoolsGkeRegionalImpl()
|
||||
}
|
||||
return m.fetchAllNodePoolsGkeImpl()
|
||||
|
|
@ -1051,7 +1049,7 @@ func (m *gceManagerImpl) GetResourceLimiter() (*cloudprovider.ResourceLimiter, e
|
|||
}
|
||||
|
||||
// Code borrowed from gce cloud provider. Reuse the original as soon as it becomes public.
|
||||
func getProjectAndLocation(isRegional bool) (string, string, error) {
|
||||
func getProjectAndLocation(regional bool) (string, string, error) {
|
||||
result, err := metadata.Get("instance/zone")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
|
@ -1061,7 +1059,7 @@ func getProjectAndLocation(isRegional bool) (string, string, error) {
|
|||
return "", "", fmt.Errorf("unexpected response: %s", result)
|
||||
}
|
||||
location := parts[3]
|
||||
if isRegional {
|
||||
if regional {
|
||||
location, err = provider_gce.GetGCERegion(location)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
|
@ -1075,7 +1073,7 @@ func getProjectAndLocation(isRegional bool) (string, string, error) {
|
|||
}
|
||||
|
||||
func (m *gceManagerImpl) findMigsNamed(name *regexp.Regexp) ([]string, error) {
|
||||
if m.isRegional {
|
||||
if m.regional {
|
||||
return m.findMigsInRegion(m.location, name)
|
||||
}
|
||||
return m.findMigsInZone(m.location, name)
|
||||
|
|
|
|||
|
|
@ -526,7 +526,7 @@ func getManagedInstancesResponse2Named(name, zone string) string {
|
|||
return fmt.Sprintf(managedInstancesResponse2, zone, name)
|
||||
}
|
||||
|
||||
func newTestGceManager(t *testing.T, testServerURL string, mode GcpCloudProviderMode, isRegional bool) *gceManagerImpl {
|
||||
func newTestGceManager(t *testing.T, testServerURL string, mode GcpCloudProviderMode, regional bool) *gceManagerImpl {
|
||||
client := &http.Client{}
|
||||
gceService, err := gce.New(client)
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -538,7 +538,7 @@ func newTestGceManager(t *testing.T, testServerURL string, mode GcpCloudProvider
|
|||
projectId: projectId,
|
||||
clusterName: clusterName,
|
||||
mode: mode,
|
||||
isRegional: isRegional,
|
||||
regional: regional,
|
||||
templates: &templateBuilder{
|
||||
projectId: projectId,
|
||||
service: gceService,
|
||||
|
|
@ -546,7 +546,7 @@ func newTestGceManager(t *testing.T, testServerURL string, mode GcpCloudProvider
|
|||
explicitlyConfigured: make(map[GceRef]bool),
|
||||
}
|
||||
|
||||
if isRegional {
|
||||
if regional {
|
||||
manager.location = region
|
||||
} else {
|
||||
manager.location = zoneB
|
||||
|
|
@ -557,7 +557,7 @@ func newTestGceManager(t *testing.T, testServerURL string, mode GcpCloudProvider
|
|||
assert.NoError(t, err)
|
||||
gkeService.BasePath = testServerURL
|
||||
manager.gkeService = gkeService
|
||||
if isRegional {
|
||||
if regional {
|
||||
gkeService, err := gke_beta.New(client)
|
||||
assert.NoError(t, err)
|
||||
gkeService.BasePath = testServerURL
|
||||
|
|
|
|||
|
|
@ -132,6 +132,8 @@ type AutoscalingOptions struct {
|
|||
// Pods with priority below cutoff are expendable. They can be killed without any consideration during scale down and they don't cause scale up.
|
||||
// Pods with null priority (PodPriority disabled) are non expendable.
|
||||
ExpendablePodsPriorityCutoff int
|
||||
// Regional tells whether the cluster is regional.
|
||||
Regional bool
|
||||
}
|
||||
|
||||
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
|
||||
|
|
@ -139,7 +141,7 @@ func NewAutoscalingContext(options AutoscalingOptions, predicateChecker *simulat
|
|||
kubeClient kube_client.Interface, kubeEventRecorder kube_record.EventRecorder,
|
||||
logEventRecorder *utils.LogEventRecorder, listerRegistry kube_util.ListerRegistry) (*AutoscalingContext, errors.AutoscalerError) {
|
||||
|
||||
cloudProviderBuilder := builder.NewCloudProviderBuilder(options.CloudProviderName, options.CloudConfig, options.ClusterName, options.NodeAutoprovisioningEnabled)
|
||||
cloudProviderBuilder := builder.NewCloudProviderBuilder(options.CloudProviderName, options.CloudConfig, options.ClusterName, options.NodeAutoprovisioningEnabled, options.Regional)
|
||||
cloudProvider := cloudProviderBuilder.Build(cloudprovider.NodeGroupDiscoveryOptions{
|
||||
NodeGroupSpecs: options.NodeGroups,
|
||||
NodeGroupAutoDiscoverySpecs: options.NodeGroupAutoDiscovery},
|
||||
|
|
|
|||
|
|
@ -131,6 +131,7 @@ var (
|
|||
maxAutoprovisionedNodeGroupCount = flag.Int("max-autoprovisioned-node-group-count", 15, "The maximum number of autoprovisioned groups in the cluster.")
|
||||
|
||||
expendablePodsPriorityCutoff = flag.Int("expendable-pods-priority-cutoff", 0, "Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.")
|
||||
regional = flag.Bool("regional", false, "Cluster is regional.")
|
||||
)
|
||||
|
||||
func createAutoscalerOptions() core.AutoscalerOptions {
|
||||
|
|
@ -180,6 +181,7 @@ func createAutoscalerOptions() core.AutoscalerOptions {
|
|||
NodeAutoprovisioningEnabled: *nodeAutoprovisioningEnabled,
|
||||
MaxAutoprovisionedNodeGroupCount: *maxAutoprovisionedNodeGroupCount,
|
||||
ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff,
|
||||
Regional: *regional,
|
||||
}
|
||||
|
||||
configFetcherOpts := dynamic.ConfigFetcherOptions{
|
||||
|
|
|
|||
Loading…
Reference in New Issue