change defaults

This commit is contained in:
Brandon Wagner 2020-08-11 15:40:54 -05:00
parent 2d6d7ec4ad
commit c4e2497a8a
2 changed files with 12 additions and 12 deletions

View File

@ -42,7 +42,7 @@ const (
vcpusToMemoryRatio = "vcpus-to-memory-ratio"
cpuArchitecture = "cpu-architecture"
gpus = "gpus"
gpuMemoryTotal = "gpu-memory-total"
gpuMemory = "gpu-memory"
placementGroupStrategy = "placement-group-strategy"
usageClass = "usage-class"
enaSupport = "ena-support"
@ -135,9 +135,9 @@ func NewCmdToolboxInstanceSelector(f *util.Factory, out io.Writer) *cobra.Comman
usageClassDefault := usageClassOndemand
outputDefault := "yaml"
dryRunDefault := false
clusterAutoscalerDefault := false
nodeCountMinDefault := 2
nodeCountMaxDefault := 15
clusterAutoscalerDefault := true
nodeCountMinDefault := 1
nodeCountMaxDefault := 10
maxResultsDefault := 20
// Instance Group Node Configurations
@ -161,7 +161,7 @@ func NewCmdToolboxInstanceSelector(f *util.Factory, out io.Writer) *cobra.Comman
commandline.RatioFlag(vcpusToMemoryRatio, nil, nil, "The ratio of vcpus to memory in MiB. (Example: 1:2)")
commandline.StringOptionsFlag(cpuArchitecture, nil, &cpuArchDefault, fmt.Sprintf("CPU architecture [%s]", strings.Join(cpuArchs, ", ")), append(cpuArchs, cpuArchitectureX8664))
commandline.IntMinMaxRangeFlags(gpus, nil, nil, "Total number of GPUs (Example: 4)")
commandline.ByteQuantityMinMaxRangeFlags(gpuMemoryTotal, nil, nil, "Number of GPUs' total memory (Example: 4gb)")
commandline.ByteQuantityMinMaxRangeFlags(gpuMemory, nil, nil, "Number of GPUs' total memory (Example: 4gb)")
commandline.StringOptionsFlag(placementGroupStrategy, nil, nil, fmt.Sprintf("Placement group strategy: [%s]", strings.Join(placementGroupStrategies, ", ")), placementGroupStrategies)
commandline.StringOptionsFlag(usageClass, nil, &usageClassDefault, fmt.Sprintf("Usage class: [%s]", strings.Join(usageClasses, ", ")), usageClasses)
commandline.BoolFlag(enaSupport, nil, nil, "Instance types where ENA is supported or required")
@ -374,7 +374,7 @@ func getFilters(commandline *cli.CommandLineInterface, region string, zones []st
VCpusToMemoryRatio: commandline.Float64Me(flags[vcpusToMemoryRatio]),
CPUArchitecture: commandline.StringMe(flags[cpuArchitecture]),
GpusRange: commandline.IntRangeMe(flags[gpus]),
GpuMemoryRange: commandline.ByteQuantityRangeMe(flags[gpuMemoryTotal]),
GpuMemoryRange: commandline.ByteQuantityRangeMe(flags[gpuMemory]),
PlacementGroupStrategy: commandline.StringMe(flags[placementGroupStrategy]),
UsageClass: commandline.StringMe(flags[usageClass]),
EnaSupport: commandline.BoolMe(flags[enaSupport]),

View File

@ -32,15 +32,15 @@ kops toolbox instance-selector [flags]
--allow-list string List of allowed instance types to select from w/ regex syntax (Example: m[3-5]\.*)
--base-instance-type string Base instance type to retrieve similarly spec'd instance types
--burst-support Burstable instance types
--cluster-autoscaler Add auto-discovery tags for cluster-autoscaler to manage the instance-group
--cluster-autoscaler Add auto-discovery tags for cluster-autoscaler to manage the instance-group (default true)
--cpu-architecture string CPU architecture [amd64, arm64] (default "amd64")
--deny-list string List of instance types which should be excluded w/ regex syntax (Example: m[1-2]\.*)
--dry-run If true, only print the object that would be sent, without sending it. This flag can be used to create a cluster YAML or JSON manifest.
--ena-support Instance types where ENA is supported or required
--flexible Retrieves a group of instance types spanning multiple generations based on opinionated defaults and user overridden resource filters
--gpu-memory-total string Number of GPUs' total memory (Example: 4gb) (sets --gpu-memory-total-min and -max to the same value)
--gpu-memory-total-max string Maximum Number of GPUs' total memory (Example: 4gb) If --gpu-memory-total-min is not specified, the lower bound will be 0
--gpu-memory-total-min string Minimum Number of GPUs' total memory (Example: 4gb) If --gpu-memory-total-max is not specified, the upper bound will be infinity
--gpu-memory string Number of GPUs' total memory (Example: 4gb) (sets --gpu-memory-min and -max to the same value)
--gpu-memory-max string Maximum Number of GPUs' total memory (Example: 4gb) If --gpu-memory-min is not specified, the lower bound will be 0
--gpu-memory-min string Minimum Number of GPUs' total memory (Example: 4gb) If --gpu-memory-max is not specified, the upper bound will be infinity
--gpus int Total number of GPUs (Example: 4) (sets --gpus-min and -max to the same value)
--gpus-max int Maximum Total number of GPUs (Example: 4) If --gpus-min is not specified, the lower bound will be 0
--gpus-min int Minimum Total number of GPUs (Example: 4) If --gpus-max is not specified, the upper bound will be infinity
@ -53,8 +53,8 @@ kops toolbox instance-selector [flags]
--network-interfaces int Number of network interfaces (ENIs) that can be attached to the instance (sets --network-interfaces-min and -max to the same value)
--network-interfaces-max int Maximum Number of network interfaces (ENIs) that can be attached to the instance If --network-interfaces-min is not specified, the lower bound will be 0
--network-interfaces-min int Minimum Number of network interfaces (ENIs) that can be attached to the instance If --network-interfaces-max is not specified, the upper bound will be infinity
--node-count-max int Set the maximum number of nodes (default 15)
--node-count-min int Set the minimum number of nodes (default 2)
--node-count-max int Set the maximum number of nodes (default 10)
--node-count-min int Set the minimum number of nodes (default 1)
--node-security-groups strings Add precreated additional security groups to nodes
--node-volume-size int Set instance volume size (in GiB) for nodes
-o, --output string Output format. One of json|yaml. Used with the --dry-run flag. (default "yaml")