From 7654a923f16bc9ab65becc6a42919a45f27f2260 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Thu, 11 Oct 2018 17:36:46 +0300 Subject: [PATCH 01/17] feature: new integration: spotinst --- cmd/kops/create_cluster.go | 23 + pkg/apis/kops/componentconfig.go | 3 + pkg/apis/kops/validation/aws.go | 6 +- pkg/featureflag/featureflag.go | 3 + pkg/model/spotinstmodel/elastigroup.go | 368 +++++ pkg/resources/aws/aws.go | 17 +- pkg/resources/spotinst/aws.go | 137 ++ pkg/resources/spotinst/interfaces.go | 60 + pkg/resources/spotinst/resources.go | 193 +++ pkg/resources/spotinst/spotinst.go | 98 ++ .../v1.8.0.yaml.template | 129 ++ .../v1.9.0.yaml.template | 129 ++ upup/pkg/fi/cloudup/apply_cluster.go | 47 +- upup/pkg/fi/cloudup/awsup/aws_cloud.go | 28 + upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go | 9 + .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 35 + .../fi/cloudup/spotinsttasks/elastigroup.go | 1345 +++++++++++++++++ .../spotinsttasks/elastigroup_fitask.go | 75 + upup/pkg/fi/cloudup/template_functions.go | 9 + 19 files changed, 2693 insertions(+), 21 deletions(-) create mode 100644 pkg/model/spotinstmodel/elastigroup.go create mode 100644 pkg/resources/spotinst/aws.go create mode 100644 pkg/resources/spotinst/interfaces.go create mode 100644 pkg/resources/spotinst/resources.go create mode 100644 pkg/resources/spotinst/spotinst.go create mode 100644 upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template create mode 100644 upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template create mode 100644 upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go create mode 100644 upup/pkg/fi/cloudup/spotinsttasks/elastigroup_fitask.go diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index dacbf20b39..165ffa7ceb 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -139,6 +139,10 @@ type CreateClusterOptions struct { // We can remove this once we support higher versions. VSphereDatastore string + // Spotinst options + SpotinstProduct string + SpotinstOrientation string + // ConfigBase is the location where we will store the configuration, it defaults to the state store ConfigBase string @@ -353,6 +357,13 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().StringVar(&options.VSphereCoreDNSServer, "vsphere-coredns-server", options.VSphereCoreDNSServer, "vsphere-coredns-server is required for vSphere.") cmd.Flags().StringVar(&options.VSphereDatastore, "vsphere-datastore", options.VSphereDatastore, "vsphere-datastore is required for vSphere. Set a valid datastore in which to store dynamic provision volumes.") } + + if featureflag.SpotinstIntegration.Enabled() { + // Spotinst flags + cmd.Flags().StringVar(&options.SpotinstProduct, "spotinst-product", options.SpotinstProduct, "Set the product code.") + cmd.Flags().StringVar(&options.SpotinstOrientation, "spotinst-orientation", options.SpotinstOrientation, "Set the group orientation.") + } + return cmd } @@ -840,6 +851,18 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e } cluster.Spec.CloudConfig.VSphereDatastore = fi.String(c.VSphereDatastore) } + + if featureflag.SpotinstIntegration.Enabled() { + if cluster.Spec.CloudConfig == nil { + cluster.Spec.CloudConfig = &api.CloudConfiguration{} + } + if c.SpotinstProduct != "" { + cluster.Spec.CloudConfig.SpotinstProduct = fi.String(c.SpotinstProduct) + } + if c.SpotinstOrientation != "" { + cluster.Spec.CloudConfig.SpotinstOrientation = fi.String(c.SpotinstOrientation) + } + } } // Populate project diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index c7f1d638b5..8bcb39fe10 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -493,6 +493,9 @@ type CloudConfiguration struct { VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"` VSphereDatastore *string `json:"vSphereDatastore,omitempty"` VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"` + // Spotinst cloud-config specs + SpotinstProduct *string `json:"spotinstProduct,omitempty"` + SpotinstOrientation *string `json:"spotinstOrientation,omitempty"` } // HasAdmissionController checks if a specific admission controller is enabled diff --git a/pkg/apis/kops/validation/aws.go b/pkg/apis/kops/validation/aws.go index 572356d3ae..bb63048a4c 100644 --- a/pkg/apis/kops/validation/aws.go +++ b/pkg/apis/kops/validation/aws.go @@ -76,8 +76,10 @@ func awsValidateMachineType(fieldPath *field.Path, machineType string) field.Err allErrs := field.ErrorList{} if machineType != "" { - if _, err := awsup.GetMachineTypeInfo(machineType); err != nil { - allErrs = append(allErrs, field.Invalid(fieldPath, machineType, "machine type specified is invalid")) + for _, typ := range strings.Split(machineType, ",") { + if _, err := awsup.GetMachineTypeInfo(typ); err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, typ, "machine type specified is invalid")) + } } } diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go index 4acb614644..66d935b95b 100644 --- a/pkg/featureflag/featureflag.go +++ b/pkg/featureflag/featureflag.go @@ -77,6 +77,9 @@ var GoogleCloudBucketAcl = New("GoogleCloudBucketAcl", Bool(false)) // EnableNodeAuthorization enables the node authorization features var EnableNodeAuthorization = New("EnableNodeAuthorization", Bool(false)) +// SpotinstIntegration toggles the use of Spotinst integration. +var SpotinstIntegration = New("SpotinstIntegration", Bool(false)) + var flags = make(map[string]*FeatureFlag) var flagsMutex sync.Mutex diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go new file mode 100644 index 0000000000..a5151cae28 --- /dev/null +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -0,0 +1,368 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinstmodel + +import ( + "fmt" + "strings" + + "github.com/golang/glog" + "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/model" + "k8s.io/kops/pkg/model/awsmodel" + "k8s.io/kops/pkg/model/defaults" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/cloudup/awstasks" + "k8s.io/kops/upup/pkg/fi/cloudup/awsup" + "k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks" +) + +const ( + // InstanceGroupLabelOrientation is the metadata label used on the + // instance group to specify which orientation should be used. + InstanceGroupLabelOrientation = "spotinst.io/orientation" + + // InstanceGroupLabelUtilizeReservedInstances is the metadata label used + // on the instance group to specify whether reserved instances should be + // utilized. + InstanceGroupLabelUtilizeReservedInstances = "spotinst.io/utilize-reserved-instances" + + // InstanceGroupLabelFallbackToOnDemand is the metadata label used on the + // instance group to specify whether fallback to on-demand instances should + // be enabled. + InstanceGroupLabelFallbackToOnDemand = "spotinst.io/fallback-to-ondemand" + + // InstanceGroupLabelAutoScalerDisabled is the metadata label used on the + // instance group to specify whether the auto-scaler should be enabled. + InstanceGroupLabelAutoScalerDisabled = "spotinst.io/autoscaler-disabled" + + // InstanceGroupLabelAutoScalerNodeLabels is the metadata label used on the + // instance group to specify whether default node labels should be set for + // the auto-scaler. + InstanceGroupLabelAutoScalerNodeLabels = "spotinst.io/autoscaler-node-labels" +) + +// ElastigroupModelBuilder configures Elastigroup objects +type ElastigroupModelBuilder struct { + *awsmodel.AWSModelContext + + BootstrapScript *model.BootstrapScript + Lifecycle *fi.Lifecycle + SecurityLifecycle *fi.Lifecycle +} + +var _ fi.ModelBuilder = &ElastigroupModelBuilder{} + +func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { + for _, ig := range b.InstanceGroups { + glog.V(2).Infof("Building instance group %q", b.AutoscalingGroupName(ig)) + + group := &spotinsttasks.Elastigroup{ + Lifecycle: b.Lifecycle, + Name: fi.String(b.AutoscalingGroupName(ig)), + ImageID: fi.String(ig.Spec.Image), + Monitoring: fi.Bool(false), + OnDemandInstanceType: fi.String(strings.Split(ig.Spec.MachineType, ",")[0]), + SpotInstanceTypes: strings.Split(ig.Spec.MachineType, ","), + SecurityGroups: []*awstasks.SecurityGroup{ + b.LinkToSecurityGroup(ig.Spec.Role), + }, + } + + // Cloud config. + { + if cfg := b.Cluster.Spec.CloudConfig; cfg != nil { + // Product. + if cfg.SpotinstProduct != nil { + group.Product = cfg.SpotinstProduct + } + + // Orientation. + if cfg.SpotinstOrientation != nil { + group.Orientation = cfg.SpotinstOrientation + } + } + } + + // Strategy. + { + for k, v := range ig.ObjectMeta.Labels { + switch k { + case InstanceGroupLabelOrientation: + group.Orientation = fi.String(v) + break + + case InstanceGroupLabelUtilizeReservedInstances: + if v == "true" { + group.UtilizeReservedInstances = fi.Bool(true) + } else if v == "false" { + group.UtilizeReservedInstances = fi.Bool(false) + } + break + + case InstanceGroupLabelFallbackToOnDemand: + if v == "true" { + group.FallbackToOnDemand = fi.Bool(true) + } else if v == "false" { + group.FallbackToOnDemand = fi.Bool(false) + } + break + } + } + } + + // Instance profile. + { + iprof, err := b.LinkToIAMInstanceProfile(ig) + if err != nil { + return err + } + group.IAMInstanceProfile = iprof + } + + // Root volume. + { + volumeSize := fi.Int32Value(ig.Spec.RootVolumeSize) + if volumeSize == 0 { + var err error + volumeSize, err = defaults.DefaultInstanceGroupVolumeSize(ig.Spec.Role) + if err != nil { + return err + } + } + + volumeType := fi.StringValue(ig.Spec.RootVolumeType) + if volumeType == "" { + volumeType = awsmodel.DefaultVolumeType + } + + group.RootVolumeSize = fi.Int64(int64(volumeSize)) + group.RootVolumeType = fi.String(volumeType) + group.RootVolumeOptimization = ig.Spec.RootVolumeOptimization + } + + // Tenancy. + { + if ig.Spec.Tenancy != "" { + group.Tenancy = fi.String(ig.Spec.Tenancy) + } + } + + // Risk. + { + var risk float64 + switch ig.Spec.Role { + case kops.InstanceGroupRoleMaster: + risk = 0 + case kops.InstanceGroupRoleNode: + risk = 100 + case kops.InstanceGroupRoleBastion: + risk = 0 + default: + return fmt.Errorf("spotinst: kops.Role not found %s", ig.Spec.Role) + } + group.Risk = &risk + } + + // Security groups. + { + for _, id := range ig.Spec.AdditionalSecurityGroups { + sgTask := &awstasks.SecurityGroup{ + Name: fi.String(id), + ID: fi.String(id), + Shared: fi.Bool(true), + } + if err := c.EnsureTask(sgTask); err != nil { + return err + } + group.SecurityGroups = append(group.SecurityGroups, sgTask) + } + } + + // SSH Key. + { + sshKey, err := b.LinkToSSHKey() + if err != nil { + return err + } + group.SSHKey = sshKey + } + + // Load balancer. + { + var lb *awstasks.LoadBalancer + switch ig.Spec.Role { + case kops.InstanceGroupRoleMaster: + if b.UseLoadBalancerForAPI() { + lb = b.LinkToELB("api") + } + case kops.InstanceGroupRoleBastion: + lb = b.LinkToELB(model.BastionELBSecurityGroupPrefix) + } + if lb != nil { + group.LoadBalancer = lb + } + } + + // User data. + { + userData, err := b.BootstrapScript.ResourceNodeUp(ig, b.Cluster) + if err != nil { + return err + } + group.UserData = userData + } + + // Public IP. + { + subnetMap := make(map[string]*kops.ClusterSubnetSpec) + for i := range b.Cluster.Spec.Subnets { + subnet := &b.Cluster.Spec.Subnets[i] + subnetMap[subnet.Name] = subnet + } + + var subnetType kops.SubnetType + for _, subnetName := range ig.Spec.Subnets { + subnet := subnetMap[subnetName] + if subnet == nil { + return fmt.Errorf("spotinst: InstanceGroup %q uses subnet %q that does not exist", ig.ObjectMeta.Name, subnetName) + } + if subnetType != "" && subnetType != subnet.Type { + return fmt.Errorf("spotinst: InstanceGroup %q cannot be in subnets of different Type", ig.ObjectMeta.Name) + } + subnetType = subnet.Type + } + + associatePublicIP := true + switch subnetType { + case kops.SubnetTypePublic, kops.SubnetTypeUtility: + associatePublicIP = true + if ig.Spec.AssociatePublicIP != nil { + associatePublicIP = *ig.Spec.AssociatePublicIP + } + case kops.SubnetTypePrivate: + associatePublicIP = false + if ig.Spec.AssociatePublicIP != nil { + if *ig.Spec.AssociatePublicIP { + glog.Warningf("Ignoring AssociatePublicIP=true for private InstanceGroup %q", ig.ObjectMeta.Name) + } + } + default: + return fmt.Errorf("spotinst: unknown subnet type %q", subnetType) + } + group.AssociatePublicIP = &associatePublicIP + } + + // Subnets. + { + subnets, err := b.GatherSubnets(ig) + if err != nil { + return err + } + if len(subnets) == 0 { + return fmt.Errorf("spotinst: could not determine any subnets for InstanceGroup %q; subnets was %s", ig.ObjectMeta.Name, ig.Spec.Subnets) + } + for _, subnet := range subnets { + group.Subnets = append(group.Subnets, b.LinkToSubnet(subnet)) + } + } + + // Capacity. + { + minSize := int32(1) + if ig.Spec.MinSize != nil { + minSize = fi.Int32Value(ig.Spec.MinSize) + } else if ig.Spec.Role == kops.InstanceGroupRoleNode { + minSize = 2 + } + + maxSize := int32(1) + if ig.Spec.MaxSize != nil { + maxSize = *ig.Spec.MaxSize + } else if ig.Spec.Role == kops.InstanceGroupRoleNode { + maxSize = 10 + } + + group.MinSize = fi.Int64(int64(minSize)) + group.MaxSize = fi.Int64(int64(maxSize)) + } + + // Tags. + { + tags, err := b.CloudTagsForInstanceGroup(ig) + if err != nil { + return fmt.Errorf("spotinst: error building cloud tags: %v", err) + } + tags[awsup.TagClusterName] = b.ClusterName() + tags["Name"] = b.AutoscalingGroupName(ig) + group.Tags = tags + } + + // Auto Scaler. + { + if ig.Spec.Role != kops.InstanceGroupRoleBastion { + group.ClusterIdentifier = fi.String(b.ClusterName()) + + // Toggle auto scaler's features. + var autoScalerDisabled bool + var autoScalerNodeLabels bool + { + for k, v := range ig.ObjectMeta.Labels { + switch k { + case InstanceGroupLabelAutoScalerDisabled: + if v == "true" { + autoScalerDisabled = true + } else if v == "false" { + autoScalerDisabled = false + } + break + + case InstanceGroupLabelAutoScalerNodeLabels: + if v == "true" { + autoScalerNodeLabels = true + } else if v == "false" { + autoScalerNodeLabels = false + } + break + } + } + } + + // Toggle the auto scaler. + group.AutoScalerEnabled = fi.Bool(!autoScalerDisabled) + + // Set the node labels. + if ig.Spec.Role == kops.InstanceGroupRoleNode { + nodeLabels := make(map[string]string) + for k, v := range ig.Spec.NodeLabels { + if strings.HasPrefix(k, kops.NodeLabelInstanceGroup) && !autoScalerNodeLabels { + continue + } + nodeLabels[k] = v + } + if len(nodeLabels) > 0 { + group.AutoScalerNodeLabels = nodeLabels + } + } + } + } + + c.AddTask(group) + } + + return nil +} diff --git a/pkg/resources/aws/aws.go b/pkg/resources/aws/aws.go index 03b837436d..16dbb3b7b3 100644 --- a/pkg/resources/aws/aws.go +++ b/pkg/resources/aws/aws.go @@ -34,7 +34,9 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kops/pkg/dns" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/resources" + "k8s.io/kops/pkg/resources/spotinst" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" ) @@ -74,8 +76,6 @@ func ListResourcesAWS(cloud awsup.AWSCloud, clusterName string) (map[string]*res ListELBs, ListELBV2s, ListTargetGroups, - // ASG - ListAutoScalingGroups, // Route 53 ListRoute53Records, @@ -83,6 +83,15 @@ func ListResourcesAWS(cloud awsup.AWSCloud, clusterName string) (map[string]*res ListIAMInstanceProfiles, ListIAMRoles, } + + if featureflag.SpotinstIntegration.Enabled() { + // Spotinst Elastigroups + listFunctions = append(listFunctions, ListSpotinstElastigroups) + } else { + // AutoScaling Groups + listFunctions = append(listFunctions, ListAutoScalingGroups) + } + for _, fn := range listFunctions { rt, err := fn(cloud, clusterName) if err != nil { @@ -2024,6 +2033,10 @@ func ListIAMInstanceProfiles(cloud fi.Cloud, clusterName string) ([]*resources.R return resourceTrackers, nil } +func ListSpotinstElastigroups(cloud fi.Cloud, clusterName string) ([]*resources.Resource, error) { + return spotinst.ListGroups(cloud.(awsup.AWSCloud).Spotinst(), clusterName) +} + func FindName(tags []*ec2.Tag) string { if name, found := awsup.FindEC2Tag(tags, "Name"); found { return name diff --git a/pkg/resources/spotinst/aws.go b/pkg/resources/spotinst/aws.go new file mode 100644 index 0000000000..cc75d65779 --- /dev/null +++ b/pkg/resources/spotinst/aws.go @@ -0,0 +1,137 @@ +package spotinst + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "k8s.io/kops/upup/pkg/fi" +) + +type awsService struct { + svc aws.Service +} + +// List returns a list of Elastigroups. +func (s *awsService) List(ctx context.Context) ([]Elastigroup, error) { + output, err := s.svc.List(ctx, nil) + if err != nil { + return nil, err + } + + groups := make([]Elastigroup, len(output.Groups)) + for i, group := range output.Groups { + groups[i] = &awsElastigroup{group} + } + + return groups, nil +} + +// Create creates a new Elastigroup and returns its ID. +func (s *awsService) Create(ctx context.Context, group Elastigroup) (string, error) { + input := &aws.CreateGroupInput{ + Group: group.Obj().(*aws.Group), + } + + output, err := s.svc.Create(ctx, input) + if err != nil { + return "", err + } + + return fi.StringValue(output.Group.ID), nil +} + +// Read returns an existing Elastigroup by ID. +func (s *awsService) Read(ctx context.Context, groupID string) (Elastigroup, error) { + input := &aws.ReadGroupInput{ + GroupID: fi.String(groupID), + } + + output, err := s.svc.Read(ctx, input) + if err != nil { + return nil, err + } + + return &awsElastigroup{output.Group}, nil +} + +// Update updates an existing Elastigroup. +func (s *awsService) Update(ctx context.Context, group Elastigroup) error { + input := &aws.UpdateGroupInput{ + Group: group.Obj().(*aws.Group), + } + + _, err := s.svc.Update(ctx, input) + return err + +} + +// Delete deletes an existing Elastigroup by ID. +func (s *awsService) Delete(ctx context.Context, groupID string) error { + input := &aws.DeleteGroupInput{ + GroupID: fi.String(groupID), + } + + _, err := s.svc.Delete(ctx, input) + return err +} + +// Detach removes one or more instances from the specified Elastigroup. +func (s *awsService) Detach(ctx context.Context, groupID string, instanceIDs []string) error { + input := &aws.DetachGroupInput{ + GroupID: fi.String(groupID), + InstanceIDs: instanceIDs, + ShouldDecrementTargetCapacity: fi.Bool(false), + ShouldTerminateInstances: fi.Bool(true), + } + + _, err := s.svc.Detach(ctx, input) + return err +} + +// Instances returns a list of all instances that belong to specified Elastigroup. +func (s *awsService) Instances(ctx context.Context, groupID string) ([]Instance, error) { + input := &aws.StatusGroupInput{ + GroupID: fi.String(groupID), + } + + output, err := s.svc.Status(ctx, input) + if err != nil { + return nil, err + } + + instances := make([]Instance, len(output.Instances)) + for i, instance := range output.Instances { + instances[i] = &awsInstance{instance} + } + + return instances, err +} + +type awsElastigroup struct { + obj *aws.Group +} + +// Id returns the ID of the Elastigroup. +func (e *awsElastigroup) Id() string { return fi.StringValue(e.obj.ID) } + +// Name returns the name of the Elastigroup. +func (e *awsElastigroup) Name() string { return fi.StringValue(e.obj.Name) } + +// MinSize returns the minimum size of the Elastigroup. +func (e *awsElastigroup) MinSize() int { return fi.IntValue(e.obj.Capacity.Minimum) } + +// MaxSize returns the maximum size of the Elastigroup. +func (e *awsElastigroup) MaxSize() int { return fi.IntValue(e.obj.Capacity.Maximum) } + +// Obj returns the underlying object which is a cloud-specific implementation. +func (e *awsElastigroup) Obj() interface{} { return e.obj } + +type awsInstance struct { + obj *aws.Instance +} + +// Id returns the ID of the instance. +func (i *awsInstance) Id() string { return fi.StringValue(i.obj.ID) } + +// Obj returns the underlying object which is a cloud-specific implementation. +func (i *awsInstance) Obj() interface{} { return i.obj } diff --git a/pkg/resources/spotinst/interfaces.go b/pkg/resources/spotinst/interfaces.go new file mode 100644 index 0000000000..549f46bed8 --- /dev/null +++ b/pkg/resources/spotinst/interfaces.go @@ -0,0 +1,60 @@ +package spotinst + +import ( + "context" +) + +type ( + // Elastigroup contains configuration info and functions to control a set + // of instances. + Elastigroup interface { + // Id returns the ID of the Elastigroup. + Id() string + + // Name returns the name of the Elastigroup. + Name() string + + // MinSize returns the minimum size of the Elastigroup. + MinSize() int + + // MaxSize returns the maximum size of the Elastigroup. + MaxSize() int + + // Obj returns the raw object which is a cloud-specific implementation. + Obj() interface{} + } + + // Instance wraps a cloud-specific instance object. + Instance interface { + // Id returns the ID of the instance. + Id() string + + // Obj returns the raw object which is a cloud-specific implementation. + Obj() interface{} + } + + // Service is an interface that a cloud provider that is supported + // by Spotinst MUST implement to manage its Elastigroups. + Service interface { + // List returns a list of Elastigroups. + List(ctx context.Context) ([]Elastigroup, error) + + // Create creates a new Elastigroup and returns its ID. + Create(ctx context.Context, group Elastigroup) (string, error) + + // Read returns an existing Elastigroup by ID. + Read(ctx context.Context, groupID string) (Elastigroup, error) + + // Update updates an existing Elastigroup. + Update(ctx context.Context, group Elastigroup) error + + // Delete deletes an existing Elastigroup by ID. + Delete(ctx context.Context, groupID string) error + + // Detach removes one or more instances from the specified Elastigroup. + Detach(ctx context.Context, groupID string, instanceIDs []string) error + + // Instances returns a list of all instances that belong to specified Elastigroup. + Instances(ctx context.Context, groupID string) ([]Instance, error) + } +) diff --git a/pkg/resources/spotinst/resources.go b/pkg/resources/spotinst/resources.go new file mode 100644 index 0000000000..f5ad6011e5 --- /dev/null +++ b/pkg/resources/spotinst/resources.go @@ -0,0 +1,193 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/cloudinstances" + "k8s.io/kops/pkg/resources" + "k8s.io/kops/upup/pkg/fi" +) + +// ListGroups returns a list of all Elastigroups as Resource objects. +func ListGroups(svc Service, clusterName string) ([]*resources.Resource, error) { + glog.V(2).Info("Listing all Elastigroups") + + groups, err := svc.List(context.Background()) + if err != nil { + return nil, err + } + + var resourceTrackers []*resources.Resource + for _, group := range groups { + if strings.HasSuffix(group.Name(), clusterName) { + resource := &resources.Resource{ + ID: group.Id(), + Name: group.Name(), + Obj: group, + Deleter: deleter(svc, group), + Dumper: dumper, + } + resourceTrackers = append(resourceTrackers, resource) + } + } + + return resourceTrackers, nil +} + +// DeleteGroup deletes an existing Elastigroup. +func DeleteGroup(svc Service, group *cloudinstances.CloudInstanceGroup) error { + glog.V(2).Infof("Deleting Elastigroup %q", group.HumanName) + + return svc.Delete( + context.Background(), + group.Raw.(Elastigroup).Id()) +} + +// DeleteInstance removes an instance from its Elastigroup. +func DeleteInstance(svc Service, instance *cloudinstances.CloudInstanceGroupMember) error { + glog.V(2).Infof("Detaching instance %q from Elastigroup", instance.ID) + + return svc.Detach( + context.Background(), + instance.CloudInstanceGroup.Raw.(Elastigroup).Id(), + []string{instance.ID}) +} + +// GetCloudGroups returns a list of Elastigroups as CloudInstanceGroup objects. +func GetCloudGroups(svc Service, cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, + warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) { + glog.V(2).Info("Listing all Elastigroups") + + groups, err := svc.List(context.Background()) + if err != nil { + return nil, err + } + + instanceGroups := make(map[string]*cloudinstances.CloudInstanceGroup) + nodeMap := cloudinstances.GetNodeMap(nodes, cluster) + + for _, group := range groups { + // Find matching instance group. + var instancegroup *kops.InstanceGroup + for _, ig := range instancegroups { + name := getGroupNameByRole(cluster, ig) + if name == "" { + continue + } + if name == group.Name() { + if instancegroup != nil { + return nil, fmt.Errorf("spotinst: found multiple instance groups matching group %q", group.Name()) + } + instancegroup = ig + } + } + + if instancegroup == nil { + if warnUnmatched { + glog.Warningf("Found group with no corresponding instance group %q", group.Name()) + } + continue + } + + // Build the instance group. + ig, err := buildCloudInstanceGroup(svc, instancegroup, group, nodeMap) + if err != nil { + return nil, fmt.Errorf("spotinst: failed to build instance group: %v", err) + } + + instanceGroups[instancegroup.ObjectMeta.Name] = ig + } + + return instanceGroups, nil +} + +func getGroupNameByRole(cluster *kops.Cluster, ig *kops.InstanceGroup) string { + var groupName string + + switch ig.Spec.Role { + case kops.InstanceGroupRoleMaster: + groupName = ig.ObjectMeta.Name + ".masters." + cluster.ObjectMeta.Name + case kops.InstanceGroupRoleNode: + groupName = ig.ObjectMeta.Name + "." + cluster.ObjectMeta.Name + case kops.InstanceGroupRoleBastion: + groupName = ig.ObjectMeta.Name + "." + cluster.ObjectMeta.Name + default: + glog.Warningf("Ignoring InstanceGroup of unknown role %q", ig.Spec.Role) + } + + return groupName +} + +func buildCloudInstanceGroup(svc Service, ig *kops.InstanceGroup, group Elastigroup, + nodeMap map[string]*v1.Node) (*cloudinstances.CloudInstanceGroup, error) { + + instances, err := svc.Instances(context.Background(), group.Id()) + if err != nil { + return nil, err + } + + instanceGroup := &cloudinstances.CloudInstanceGroup{ + HumanName: group.Name(), + InstanceGroup: ig, + MinSize: group.MinSize(), + MaxSize: group.MaxSize(), + Raw: group, + } + + currentName := group.Name() + newName := fmt.Sprintf("%s:%d", group.Name(), time.Now().Nanosecond()) + + for _, instance := range instances { + if instance.Id() == "" { + glog.Warningf("Ignoring instance with no ID: %v", instance) + continue + } + + if err := instanceGroup.NewCloudInstanceGroupMember( + instance.Id(), currentName, newName, nodeMap); err != nil { + return nil, fmt.Errorf("spotinst: error creating cloud instance group member: %v", err) + } + } + + return instanceGroup, nil +} + +func deleter(svc Service, group Elastigroup) func(fi.Cloud, *resources.Resource) error { + return func(cloud fi.Cloud, resource *resources.Resource) error { + glog.V(2).Infof("Deleting Elastigroup %q", group.Id()) + return svc.Delete(context.Background(), group.Id()) + } +} + +func dumper(op *resources.DumpOperation, resource *resources.Resource) error { + data := make(map[string]interface{}) + + data["id"] = resource.ID + data["type"] = resource.Type + data["raw"] = resource.Obj + + op.Dump.Resources = append(op.Dump.Resources, data) + return nil +} diff --git a/pkg/resources/spotinst/spotinst.go b/pkg/resources/spotinst/spotinst.go new file mode 100644 index 0000000000..5ba9c8403b --- /dev/null +++ b/pkg/resources/spotinst/spotinst.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "fmt" + + "github.com/golang/glog" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/credentials" + "github.com/spotinst/spotinst-sdk-go/spotinst/log" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" + kopsv "k8s.io/kops" + "k8s.io/kops/pkg/apis/kops" +) + +// NewService returns a Service interface for the specified cloud provider. +func NewService(cloudProviderID kops.CloudProviderID) (Service, error) { + svc := elastigroup.New(session.New(NewConfig())) + + switch cloudProviderID { + case kops.CloudProviderAWS: + return &awsService{svc.CloudProviderAWS()}, nil + default: + return nil, fmt.Errorf("spotinst: unsupported cloud provider: %s", cloudProviderID) + } +} + +// NewConfig returns a new configuration object. +func NewConfig() *spotinst.Config { + config := spotinst.DefaultConfig() + + config.WithCredentials(NewCredentials()) + config.WithLogger(NewStdLogger()) + config.WithUserAgent("kubernetes-kops/" + kopsv.Version) + + return config +} + +// NewCredentials returns a new chain-credentials object. +func NewCredentials() *credentials.Credentials { + return credentials.NewChainCredentials( + new(credentials.EnvProvider), + new(credentials.FileProvider), + ) +} + +// NewStdLogger returns a new Logger. +func NewStdLogger() log.Logger { + return log.LoggerFunc(func(format string, args ...interface{}) { + glog.V(2).Infof(format, args...) + }) +} + +// NewElastigroup returns an Elastigroup wrapper for the specified cloud provider. +func NewElastigroup(cloudProviderID kops.CloudProviderID, + obj interface{}) (Elastigroup, error) { + + switch cloudProviderID { + case kops.CloudProviderAWS: + return &awsElastigroup{obj.(*aws.Group)}, nil + default: + return nil, fmt.Errorf("spotinst: unsupported cloud provider: %s", cloudProviderID) + } +} + +// LoadCredentials attempts to load credentials using the default chain. +func LoadCredentials() (credentials.Value, error) { + var ( + chain = NewCredentials() + creds credentials.Value + err error + ) + + // Attempt to load the credentials. + creds, err = chain.Get() + if err != nil { + return creds, fmt.Errorf("spotinst: unable to load credentials: %v", err) + } + + return creds, nil +} diff --git a/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template b/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template new file mode 100644 index 0000000000..f8146d93c2 --- /dev/null +++ b/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.8.0.yaml.template @@ -0,0 +1,129 @@ +# ------------------------------------------ +# Config Map +# ------------------------------------------ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spotinst-kubernetes-cluster-controller-config + namespace: kube-system +data: + spotinst.token: {{ SpotinstToken }} + spotinst.account: {{ SpotinstAccount }} + spotinst.cluster-identifier: {{ ClusterName }} +--- +# ------------------------------------------ +# Secret +# ------------------------------------------ +apiVersion: v1 +kind: Secret +metadata: + name: spotinst-kubernetes-cluster-controller-certs + namespace: kube-system +type: Opaque +--- +# ------------------------------------------ +# Service Account +# ------------------------------------------ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +--- +# ------------------------------------------ +# Cluster Role +# ------------------------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "delete", "list", "patch", "update"] +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get","list","patch"] +- apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get","list"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles"] + verbs: ["patch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["list"] +- nonResourceURLs: ["/version/", "/version"] + verbs: ["get"] +--- +# ------------------------------------------ +# Cluster Role Binding +# ------------------------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: spotinst-kubernetes-cluster-controller +subjects: +- kind: ServiceAccount + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +--- +# ------------------------------------------ +# Deployment +# ------------------------------------------ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + template: + metadata: + labels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + spec: + containers: + - name: spotinst-kubernetes-cluster-controller + image: spotinst/kubernetes-cluster-controller:1.0.16 + imagePullPolicy: Always + env: + - name: SPOTINST_TOKEN + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.token + - name: SPOTINST_ACCOUNT + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.account + - name: CLUSTER_IDENTIFIER + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.cluster-identifier + volumeMounts: + - name: spotinst-kubernetes-cluster-controller-certs + mountPath: /certs + volumes: + - name: spotinst-kubernetes-cluster-controller-certs + secret: + secretName: spotinst-kubernetes-cluster-controller-certs + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: spotinst-kubernetes-cluster-controller +--- diff --git a/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template b/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template new file mode 100644 index 0000000000..832a4e60e0 --- /dev/null +++ b/upup/models/cloudup/resources/addons/spotinst-kubernetes-cluster-controller.addons.k8s.io/v1.9.0.yaml.template @@ -0,0 +1,129 @@ +# ------------------------------------------ +# Config Map +# ------------------------------------------ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spotinst-kubernetes-cluster-controller-config + namespace: kube-system +data: + spotinst.token: {{ SpotinstToken }} + spotinst.account: {{ SpotinstAccount }} + spotinst.cluster-identifier: {{ ClusterName }} +--- +# ------------------------------------------ +# Secret +# ------------------------------------------ +apiVersion: v1 +kind: Secret +metadata: + name: spotinst-kubernetes-cluster-controller-certs + namespace: kube-system +type: Opaque +--- +# ------------------------------------------ +# Service Account +# ------------------------------------------ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +--- +# ------------------------------------------ +# Cluster Role +# ------------------------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "delete", "list", "patch", "update"] +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get","list","patch"] +- apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get","list"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles"] + verbs: ["patch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["list"] +- nonResourceURLs: ["/version/", "/version"] + verbs: ["get"] +--- +# ------------------------------------------ +# Cluster Role Binding +# ------------------------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: spotinst-kubernetes-cluster-controller +subjects: +- kind: ServiceAccount + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +--- +# ------------------------------------------ +# Deployment +# ------------------------------------------ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + name: spotinst-kubernetes-cluster-controller + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + template: + metadata: + labels: + k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io + spec: + containers: + - name: spotinst-kubernetes-cluster-controller + image: spotinst/kubernetes-cluster-controller:1.0.16 + imagePullPolicy: Always + env: + - name: SPOTINST_TOKEN + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.token + - name: SPOTINST_ACCOUNT + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.account + - name: CLUSTER_IDENTIFIER + valueFrom: + configMapKeyRef: + name: spotinst-kubernetes-cluster-controller-config + key: spotinst.cluster-identifier + volumeMounts: + - name: spotinst-kubernetes-cluster-controller-certs + mountPath: /certs + volumes: + - name: spotinst-kubernetes-cluster-controller-certs + secret: + secretName: spotinst-kubernetes-cluster-controller-certs + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: spotinst-kubernetes-cluster-controller +--- diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index 159864d2a7..1d22b0bfbf 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -45,6 +45,7 @@ import ( "k8s.io/kops/pkg/model/domodel" "k8s.io/kops/pkg/model/gcemodel" "k8s.io/kops/pkg/model/openstackmodel" + "k8s.io/kops/pkg/model/spotinstmodel" "k8s.io/kops/pkg/model/vspheremodel" "k8s.io/kops/pkg/resources/digitalocean" "k8s.io/kops/pkg/templates" @@ -62,6 +63,7 @@ import ( "k8s.io/kops/upup/pkg/fi/cloudup/gcetasks" "k8s.io/kops/upup/pkg/fi/cloudup/openstack" "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks" + "k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks" "k8s.io/kops/upup/pkg/fi/cloudup/terraform" "k8s.io/kops/upup/pkg/fi/cloudup/vsphere" "k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks" @@ -380,16 +382,16 @@ func (c *ApplyClusterCmd) Run() error { "iamRolePolicy": &awstasks.IAMRolePolicy{}, // VPC / Networking - "dhcpOptions": &awstasks.DHCPOptions{}, - "internetGateway": &awstasks.InternetGateway{}, - "route": &awstasks.Route{}, - "routeTable": &awstasks.RouteTable{}, - "routeTableAssociation": &awstasks.RouteTableAssociation{}, - "securityGroup": &awstasks.SecurityGroup{}, - "securityGroupRule": &awstasks.SecurityGroupRule{}, - "subnet": &awstasks.Subnet{}, - "vpc": &awstasks.VPC{}, - "ngw": &awstasks.NatGateway{}, + "dhcpOptions": &awstasks.DHCPOptions{}, + "internetGateway": &awstasks.InternetGateway{}, + "route": &awstasks.Route{}, + "routeTable": &awstasks.RouteTable{}, + "routeTableAssociation": &awstasks.RouteTableAssociation{}, + "securityGroup": &awstasks.SecurityGroup{}, + "securityGroupRule": &awstasks.SecurityGroupRule{}, + "subnet": &awstasks.Subnet{}, + "vpc": &awstasks.VPC{}, + "ngw": &awstasks.NatGateway{}, "vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{}, // ELB @@ -399,6 +401,9 @@ func (c *ApplyClusterCmd) Run() error { // Autoscaling "autoscalingGroup": &awstasks.AutoscalingGroup{}, "launchConfiguration": &awstasks.LaunchConfiguration{}, + + // Spotinst + "spotinstElastigroup": &spotinsttasks.Elastigroup{}, }) if len(sshPublicKeys) == 0 { @@ -673,13 +678,21 @@ func (c *ApplyClusterCmd) Run() error { KopsModelContext: modelContext, } - l.Builders = append(l.Builders, &awsmodel.AutoscalingGroupModelBuilder{ - AWSModelContext: awsModelContext, - BootstrapScript: bootstrapScriptBuilder, - Lifecycle: &clusterLifecycle, - - SecurityLifecycle: &securityLifecycle, - }) + if featureflag.SpotinstIntegration.Enabled() { + l.Builders = append(l.Builders, &spotinstmodel.ElastigroupModelBuilder{ + AWSModelContext: awsModelContext, + BootstrapScript: bootstrapScriptBuilder, + Lifecycle: &clusterLifecycle, + SecurityLifecycle: &securityLifecycle, + }) + } else { + l.Builders = append(l.Builders, &awsmodel.AutoscalingGroupModelBuilder{ + AWSModelContext: awsModelContext, + BootstrapScript: bootstrapScriptBuilder, + Lifecycle: &clusterLifecycle, + SecurityLifecycle: &securityLifecycle, + }) + } case kops.CloudProviderDO: doModelContext := &domodel.DOModelContext{ KopsModelContext: modelContext, diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index f53496d667..bb8e53b044 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -48,6 +48,8 @@ import ( "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops/model" "k8s.io/kops/pkg/cloudinstances" + "k8s.io/kops/pkg/featureflag" + "k8s.io/kops/pkg/resources/spotinst" "k8s.io/kops/upup/pkg/fi" k8s_aws "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" ) @@ -102,6 +104,7 @@ type AWSCloud interface { ELBV2() elbv2iface.ELBV2API Autoscaling() autoscalingiface.AutoScalingAPI Route53() route53iface.Route53API + Spotinst() spotinst.Service // TODO: Document and rationalize these tags/filters methods AddTags(name *string, tags map[string]string) @@ -157,6 +160,7 @@ type awsCloudImplementation struct { elbv2 *elbv2.ELBV2 autoscaling *autoscaling.AutoScaling route53 *route53.Route53 + spotinst spotinst.Service region string @@ -265,6 +269,13 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) { c.route53.Handlers.Send.PushFront(requestLogger) c.addHandlers(region, &c.route53.Handlers) + if featureflag.SpotinstIntegration.Enabled() { + c.spotinst, err = spotinst.NewService(kops.CloudProviderAWS) + if err != nil { + return c, err + } + } + awsCloudInstances[region] = c raw = c } @@ -325,6 +336,10 @@ func NewEC2Filter(name string, values ...string) *ec2.Filter { // DeleteGroup deletes an aws autoscaling group func (c *awsCloudImplementation) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error { + if c.spotinst != nil { + return spotinst.DeleteGroup(c.spotinst, g) + } + return deleteGroup(c, g) } @@ -366,6 +381,10 @@ func deleteGroup(c AWSCloud, g *cloudinstances.CloudInstanceGroup) error { // DeleteInstance deletes an aws instance func (c *awsCloudImplementation) DeleteInstance(i *cloudinstances.CloudInstanceGroupMember) error { + if c.spotinst != nil { + return spotinst.DeleteInstance(c.spotinst, i) + } + return deleteInstance(c, i) } @@ -393,6 +412,11 @@ func deleteInstance(c AWSCloud, i *cloudinstances.CloudInstanceGroupMember) erro // GetCloudGroups returns a groups of instances that back a kops instance groups func (c *awsCloudImplementation) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) { + if c.spotinst != nil { + return spotinst.GetCloudGroups(c.spotinst, cluster, + instancegroups, warnUnmatched, nodes) + } + return getCloudGroups(c, cluster, instancegroups, warnUnmatched, nodes) } @@ -1140,6 +1164,10 @@ func (c *awsCloudImplementation) Route53() route53iface.Route53API { return c.route53 } +func (c *awsCloudImplementation) Spotinst() spotinst.Service { + return c.spotinst +} + func (c *awsCloudImplementation) FindVPCInfo(vpcID string) (*fi.VPCInfo, error) { return findVPCInfo(c, vpcID) } diff --git a/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go b/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go index c16efdf39d..da1c161d8a 100644 --- a/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/mock_aws_cloud.go @@ -34,6 +34,7 @@ import ( dnsproviderroute53 "k8s.io/kops/dnsprovider/pkg/dnsprovider/providers/aws/route53" "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/cloudinstances" + "k8s.io/kops/pkg/resources/spotinst" "k8s.io/kops/upup/pkg/fi" ) @@ -78,6 +79,7 @@ type MockCloud struct { MockRoute53 route53iface.Route53API MockELB elbiface.ELBAPI MockELBV2 elbv2iface.ELBV2API + MockSpotinst spotinst.Service } func (c *MockAWSCloud) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error { @@ -236,6 +238,13 @@ func (c *MockAWSCloud) Route53() route53iface.Route53API { return c.MockRoute53 } +func (c *MockAWSCloud) Spotinst() spotinst.Service { + if c.MockSpotinst == nil { + glog.Fatalf("MockSpotinst not set") + } + return c.MockSpotinst +} + func (c *MockAWSCloud) FindVPCInfo(id string) (*fi.VPCInfo, error) { return findVPCInfo(c, id) } diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 51920c102d..43194737dd 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -445,6 +445,41 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri } } + if featureflag.SpotinstIntegration.Enabled() { + key := "spotinst-kubernetes-cluster-controller.addons.k8s.io" + version := "1.0.16" + + { + id := "v1.8.0" + location := key + "/" + id + ".yaml" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: "<1.9.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } + + { + id := "v1.9.0" + location := key + "/" + id + ".yaml" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: ">=1.9.0", + Id: id, + }) + manifests[key+"-"+id] = "addons/" + location + } + } + // The role.kubernetes.io/networking is used to label anything related to a networking addin, // so that if we switch networking plugins (e.g. calico -> weave or vice-versa), we'll replace the // old networking plugin, and there won't be old pods "floating around". diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go new file mode 100644 index 0000000000..4b1f1c1851 --- /dev/null +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go @@ -0,0 +1,1345 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinsttasks + +import ( + "context" + "encoding/base64" + "fmt" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/golang/glog" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kops/pkg/resources/spotinst" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/cloudup/awstasks" + "k8s.io/kops/upup/pkg/fi/cloudup/awsup" + "k8s.io/kops/upup/pkg/fi/cloudup/terraform" + "k8s.io/kops/upup/pkg/fi/utils" +) + +//go:generate fitask -type=Elastigroup +type Elastigroup struct { + Name *string + Lifecycle *fi.Lifecycle + + ID *string + MinSize *int64 + MaxSize *int64 + Risk *float64 + UtilizeReservedInstances *bool + FallbackToOnDemand *bool + Product *string + Orientation *string + Tags map[string]string + UserData *fi.ResourceHolder + ImageID *string + OnDemandInstanceType *string + SpotInstanceTypes []string + IAMInstanceProfile *awstasks.IAMInstanceProfile + LoadBalancer *awstasks.LoadBalancer + SSHKey *awstasks.SSHKey + Subnets []*awstasks.Subnet + SecurityGroups []*awstasks.SecurityGroup + Monitoring *bool + AssociatePublicIP *bool + RootVolumeSize *int64 + RootVolumeType *string + RootVolumeIOPS *int64 + RootVolumeOptimization *bool + Tenancy *string + AutoScalerEnabled *bool + AutoScalerNodeLabels map[string]string + ClusterIdentifier *string +} + +var _ fi.CompareWithID = &Elastigroup{} + +func (e *Elastigroup) CompareWithID() *string { + return e.Name +} + +func (e *Elastigroup) find(svc spotinst.Service, name string) (*aws.Group, error) { + groups, err := svc.List(context.Background()) + if err != nil { + return nil, fmt.Errorf("spotinst: failed to find elastigroup %s: %v", name, err) + } + + var out *aws.Group + for _, group := range groups { + if group.Name() == name { + out = group.Obj().(*aws.Group) + break + } + } + if out == nil { + return nil, fmt.Errorf("spotinst: failed to find elastigroup %q", name) + } + + return out, nil +} + +var _ fi.HasCheckExisting = &Elastigroup{} + +func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { + cloud := c.Cloud.(awsup.AWSCloud) + + group, err := e.find(cloud.Spotinst(), *e.Name) + if err != nil { + return nil, err + } + if group == nil { + return nil, nil + } + + actual := &Elastigroup{} + actual.ID = group.ID + actual.Name = group.Name + actual.MinSize = fi.Int64(int64(fi.IntValue(group.Capacity.Minimum))) + actual.MaxSize = fi.Int64(int64(fi.IntValue(group.Capacity.Maximum))) + actual.Orientation = group.Strategy.AvailabilityVsCost + + // Compute. + { + compute := group.Compute + actual.Product = compute.Product + + // Instance types. + { + actual.OnDemandInstanceType = compute.InstanceTypes.OnDemand + actual.SpotInstanceTypes = compute.InstanceTypes.Spot + } + + // Subnets. + { + for _, zone := range compute.AvailabilityZones { + if zone.SubnetID != nil { + actual.Subnets = append(actual.Subnets, &awstasks.Subnet{ID: zone.SubnetID}) + } + } + if subnetSlicesEqualIgnoreOrder(actual.Subnets, e.Subnets) { + actual.Subnets = e.Subnets + } + } + } + + // Launch Specification. + { + lc := group.Compute.LaunchSpecification + + // Image. + { + image, err := resolveImage(cloud, fi.StringValue(lc.ImageID)) + if err != nil { + return nil, err + } + actual.ImageID = image.Name + } + + // Tags. + { + if len(lc.Tags) > 0 { + actual.Tags = make(map[string]string) + for _, tag := range lc.Tags { + actual.Tags[*tag.Key] = *tag.Value + } + } + } + + // Security groups. + { + for _, sg := range lc.SecurityGroupIDs { + actual.SecurityGroups = append(actual.SecurityGroups, &awstasks.SecurityGroup{ID: fi.String(sg)}) + } + } + + // Block device mappings. + { + for _, b := range lc.BlockDeviceMappings { + if b.EBS == nil || b.EBS.SnapshotID != nil { + // Not the root. + continue + } + actual.RootVolumeType = b.EBS.VolumeType + actual.RootVolumeSize = fi.Int64(int64(fi.IntValue(b.EBS.VolumeSize))) + actual.RootVolumeIOPS = fi.Int64(int64(fi.IntValue(b.EBS.IOPS))) + } + } + + // User data. + { + if lc.UserData != nil { + userData, err := base64.StdEncoding.DecodeString(*lc.UserData) + if err != nil { + return nil, fmt.Errorf("spotinst: error decoding user data: %v", err) + } + actual.UserData = fi.WrapResource(fi.NewStringResource(string(userData))) + } + } + + // Network interfaces. + { + associatePublicIP := false + if len(lc.NetworkInterfaces) > 0 { + for _, iface := range lc.NetworkInterfaces { + if fi.BoolValue(iface.AssociatePublicIPAddress) { + associatePublicIP = true + break + } + } + } + actual.AssociatePublicIP = fi.Bool(associatePublicIP) + } + + if lc.LoadBalancersConfig != nil { + if lbs := lc.LoadBalancersConfig.LoadBalancers; len(lbs) > 0 { + actual.LoadBalancer = &awstasks.LoadBalancer{ + Name: lbs[0].Name, + LoadBalancerName: lbs[0].Name, + } + } + } + + if lc.IAMInstanceProfile != nil { + actual.IAMInstanceProfile = &awstasks.IAMInstanceProfile{Name: lc.IAMInstanceProfile.Name} + } + + if lc.KeyPair != nil { + actual.SSHKey = &awstasks.SSHKey{Name: lc.KeyPair} + } + + if lc.Tenancy != nil { + actual.Tenancy = lc.Tenancy + } + } + + // Avoid spurious changes + actual.Lifecycle = e.Lifecycle + + return actual, nil +} + +func (e *Elastigroup) CheckExisting(c *fi.Context) bool { + cloud := c.Cloud.(awsup.AWSCloud) + group, err := e.find(cloud.Spotinst(), *e.Name) + return err == nil && group != nil +} + +func (e *Elastigroup) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Elastigroup) CheckChanges(a, e, changes *Elastigroup) error { + if e.ImageID == nil { + return fi.RequiredField("ImageID") + } + if e.OnDemandInstanceType == nil { + return fi.RequiredField("OnDemandInstanceType") + } + if a != nil { + if e.Name == nil { + return fi.RequiredField("Name") + } + } + return nil +} + +func (eg *Elastigroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Elastigroup) error { + return eg.createOrUpdate(t.Cloud.(awsup.AWSCloud), a, e, changes) +} + +func (eg *Elastigroup) createOrUpdate(cloud awsup.AWSCloud, a, e, changes *Elastigroup) error { + if a == nil { + return eg.create(cloud, a, e, changes) + } else { + return eg.update(cloud, a, e, changes) + } +} + +func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) error { + glog.V(2).Infof("Creating elastigroup %q", *e.Name) + e.applyDefaults() + + group := &aws.Group{ + Capacity: new(aws.Capacity), + Strategy: new(aws.Strategy), + Compute: &aws.Compute{ + LaunchSpecification: new(aws.LaunchSpecification), + InstanceTypes: new(aws.InstanceTypes), + }, + } + + // General. + { + group.SetName(e.Name) + group.SetDescription(e.Name) + } + + // Capacity. + { + group.Capacity.SetTarget(fi.Int(int(*e.MinSize))) + group.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) + group.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + } + + // Strategy. + { + group.Strategy.SetRisk(e.Risk) + group.Strategy.SetAvailabilityVsCost(fi.String(string(normalizeOrientation(e.Orientation)))) + group.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand) + group.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances) + } + + // Compute. + { + group.Compute.SetProduct(e.Product) + + // Instance types. + { + group.Compute.InstanceTypes.SetOnDemand(e.OnDemandInstanceType) + group.Compute.InstanceTypes.SetSpot(e.SpotInstanceTypes) + } + + // Availability zones. + { + zones := make([]*aws.AvailabilityZone, len(e.Subnets)) + for i, subnet := range e.Subnets { + zone := new(aws.AvailabilityZone) + zone.SetName(subnet.AvailabilityZone) + zone.SetSubnetId(subnet.ID) + zones[i] = zone + } + group.Compute.SetAvailabilityZones(zones) + } + + // Launch Specification. + { + group.Compute.LaunchSpecification.SetMonitoring(e.Monitoring) + group.Compute.LaunchSpecification.SetKeyPair(e.SSHKey.Name) + + if e.Tenancy != nil { + group.Compute.LaunchSpecification.SetTenancy(e.Tenancy) + } + + // Block device mappings. + { + rootDevices, err := e.buildRootDevice(cloud) + if err != nil { + return err + } + + ephemeralDevices, err := e.buildEphemeralDevices(e.OnDemandInstanceType) + if err != nil { + return err + } + + if len(rootDevices) != 0 || len(ephemeralDevices) != 0 { + var mappings []*aws.BlockDeviceMapping + for device, bdm := range rootDevices { + mappings = append(mappings, e.buildBlockDeviceMapping(device, bdm)) + } + for device, bdm := range ephemeralDevices { + mappings = append(mappings, e.buildBlockDeviceMapping(device, bdm)) + } + if len(mappings) > 0 { + group.Compute.LaunchSpecification.SetBlockDeviceMappings(mappings) + } + } + } + + // Image ID. + { + image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + if err != nil { + return err + } + group.Compute.LaunchSpecification.SetImageId(image.ImageId) + } + + // User data. + { + if e.UserData != nil { + userData, err := e.UserData.AsString() + if err != nil { + return err + } + encoded := base64.StdEncoding.EncodeToString([]byte(userData)) + group.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + } + } + + // IAM instance profile. + { + iprof := new(aws.IAMInstanceProfile) + iprof.SetName(e.IAMInstanceProfile.GetName()) + group.Compute.LaunchSpecification.SetIAMInstanceProfile(iprof) + } + + // Security groups. + { + securityGroupIDs := make([]string, len(e.SecurityGroups)) + for i, sg := range e.SecurityGroups { + securityGroupIDs[i] = *sg.ID + } + group.Compute.LaunchSpecification.SetSecurityGroupIDs(securityGroupIDs) + } + + // Public IP. + { + if *e.AssociatePublicIP { + iface := new(aws.NetworkInterface) + iface.SetDeviceIndex(fi.Int(0)) + iface.SetAssociatePublicIPAddress(fi.Bool(true)) + iface.SetDeleteOnTermination(fi.Bool(true)) + group.Compute.LaunchSpecification.SetNetworkInterfaces( + []*aws.NetworkInterface{iface}, + ) + } + } + + // Load balancer. + { + if e.LoadBalancer != nil { + elb, err := awstasks.FindLoadBalancerByNameTag(cloud, fi.StringValue(e.LoadBalancer.Name)) + if err != nil { + return err + } + if elb != nil { + lb := new(aws.LoadBalancer) + lb.SetName(elb.LoadBalancerName) + lb.SetType(fi.String("CLASSIC")) + + cfg := new(aws.LoadBalancersConfig) + cfg.SetLoadBalancers([]*aws.LoadBalancer{lb}) + + group.Compute.LaunchSpecification.SetLoadBalancersConfig(cfg) + } + } + } + + // Tags. + { + tags := e.buildTags() + group.Compute.LaunchSpecification.SetTags(tags) + } + } + } + + // Integration. + { + if e.ClusterIdentifier != nil { + k8s := new(aws.KubernetesIntegration) + k8s.SetClusterIdentifier(e.ClusterIdentifier) + k8s.SetIntegrationMode(fi.String("pod")) + + if e.AutoScalerEnabled != nil { + autoScale := new(aws.AutoScaleKubernetes) + autoScale.SetIsEnabled(e.AutoScalerEnabled) + + labelsMap := e.AutoScalerNodeLabels + if labelsMap != nil && len(labelsMap) > 0 { + labels := e.buildAutoScaleLabels(labelsMap) + autoScale.SetLabels(labels) + } + + k8s.SetAutoScale(autoScale) + } + + integration := new(aws.Integration) + integration.SetKubernetes(k8s) + + group.SetIntegration(integration) + } + } + + attempt := 0 + maxAttempts := 10 + +readyLoop: + for { + attempt++ + glog.V(2).Infof("(%d/%d) Attempting to create elastigroup: %s, config: %s", + attempt, maxAttempts, *e.Name, stringutil.Stringify(group)) + + // Wait for IAM instance profile to be ready. + time.Sleep(10 * time.Second) + + // Wrap the raw object as an Elastigroup. + eg, err := spotinst.NewElastigroup(cloud.ProviderID(), group) + if err != nil { + return err + } + + // Create the Elastigroup. + id, err := cloud.Spotinst().Create(context.Background(), eg) + if err == nil { + e.ID = fi.String(id) + break + } + + if errs, ok := err.(client.Errors); ok { + for _, err := range errs { + if strings.Contains(err.Message, "Invalid IAM Instance Profile name") { + if attempt > maxAttempts { + return fmt.Errorf("IAM instance profile not yet created/propagated (original error: %v)", err) + } + + glog.V(4).Infof("Got an error indicating that the IAM instance profile %q is not ready %q", fi.StringValue(e.IAMInstanceProfile.Name), err) + glog.Infof("Waiting for IAM instance profile %q to be ready", fi.StringValue(e.IAMInstanceProfile.Name)) + goto readyLoop + } + } + + return fmt.Errorf("spotinst: failed to create elastigroup: %v", err) + } + } + + return nil +} + +func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) error { + glog.V(2).Infof("Updating elastigroup %q", *e.Name) + + actual, err := e.find(cloud.Spotinst(), *e.Name) + if err != nil { + glog.Errorf("Unable to resolve elastigroup %q, error: %s", *e.Name, err) + return err + } + + group := new(aws.Group) + group.SetId(actual.ID) + + // Strategy. + { + // Orientation. + if changes.Orientation != nil { + if group.Strategy == nil { + group.Strategy = new(aws.Strategy) + } + + group.Strategy.SetAvailabilityVsCost(fi.String(string(normalizeOrientation(e.Orientation)))) + changes.Orientation = nil + } + + // Fallback to on-demand. + if changes.FallbackToOnDemand != nil { + if group.Strategy == nil { + group.Strategy = new(aws.Strategy) + } + + group.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand) + changes.FallbackToOnDemand = nil + } + + // Utilize reserved instances. + if changes.UtilizeReservedInstances != nil { + if group.Strategy == nil { + group.Strategy = new(aws.Strategy) + } + + group.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances) + changes.UtilizeReservedInstances = nil + } + } + + // Compute. + { + // Product. + if changes.Product != nil { + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + + group.Compute.SetProduct(e.Product) + changes.Product = nil + } + + // OnDemand instance type. + { + if changes.OnDemandInstanceType != nil { + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.InstanceTypes == nil { + group.Compute.InstanceTypes = new(aws.InstanceTypes) + } + + group.Compute.InstanceTypes.SetOnDemand(e.OnDemandInstanceType) + changes.OnDemandInstanceType = nil + } + } + + // Spot instance types. + { + if changes.SpotInstanceTypes != nil { + types := make([]string, len(e.SpotInstanceTypes)) + for i, typ := range e.SpotInstanceTypes { + types[i] = typ + } + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.InstanceTypes == nil { + group.Compute.InstanceTypes = new(aws.InstanceTypes) + } + + group.Compute.InstanceTypes.SetSpot(types) + changes.SpotInstanceTypes = nil + } + } + + // Availability zones. + { + if changes.Subnets != nil { + zones := make([]*aws.AvailabilityZone, len(e.Subnets)) + for i, subnet := range e.Subnets { + zone := new(aws.AvailabilityZone) + zone.SetName(subnet.AvailabilityZone) + zone.SetSubnetId(subnet.ID) + zones[i] = zone + } + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + + group.Compute.SetAvailabilityZones(zones) + changes.Subnets = nil + } + } + + // Launch specification. + { + // Security groups. + { + if changes.SecurityGroups != nil { + securityGroupIDs := make([]string, len(e.SecurityGroups)) + for i, sg := range e.SecurityGroups { + securityGroupIDs[i] = *sg.ID + } + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetSecurityGroupIDs(securityGroupIDs) + changes.SecurityGroups = nil + } + } + + // User data. + { + if changes.UserData != nil { + userData, err := e.UserData.AsString() + if err != nil { + return err + } + encoded := base64.StdEncoding.EncodeToString([]byte(userData)) + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + changes.UserData = nil + } + } + + // Network interfaces. + { + if changes.AssociatePublicIP != nil { + if *changes.AssociatePublicIP { + iface := new(aws.NetworkInterface) + iface.SetDeviceIndex(fi.Int(0)) + iface.SetAssociatePublicIPAddress(fi.Bool(true)) + iface.SetDeleteOnTermination(fi.Bool(true)) + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + group.Compute.LaunchSpecification.SetNetworkInterfaces( + []*aws.NetworkInterface{iface}, + ) + } + + changes.AssociatePublicIP = nil + } + } + + // Image ID. + { + if changes.ImageID != nil { + image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + if err != nil { + return err + } + + if *actual.Compute.LaunchSpecification.ImageID != *image.ImageId { + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + group.Compute.LaunchSpecification.SetImageId(image.ImageId) + } + + changes.ImageID = nil + } + } + + // Tags. + { + if changes.Tags != nil { + tags := e.buildTags() + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetTags(tags) + changes.Tags = nil + } + } + + // IAM instance profile. + { + if changes.IAMInstanceProfile != nil { + iprof := new(aws.IAMInstanceProfile) + iprof.SetName(e.IAMInstanceProfile.GetName()) + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetIAMInstanceProfile(iprof) + changes.IAMInstanceProfile = nil + } + } + + // SSH key. + { + if changes.SSHKey != nil { + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetKeyPair(e.SSHKey.Name) + changes.SSHKey = nil + } + } + + // Load balancer. + { + if changes.LoadBalancer != nil { + elb, err := awstasks.FindLoadBalancerByNameTag(cloud, fi.StringValue(e.LoadBalancer.Name)) + if err != nil { + return err + } + if elb != nil { + lb := new(aws.LoadBalancer) + lb.SetName(elb.LoadBalancerName) + lb.SetType(fi.String("CLASSIC")) + + cfg := new(aws.LoadBalancersConfig) + cfg.SetLoadBalancers([]*aws.LoadBalancer{lb}) + + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetLoadBalancersConfig(cfg) + changes.LoadBalancer = nil + } + } + } + + // Tenancy. + { + if changes.Tenancy != nil { + if group.Compute == nil { + group.Compute = new(aws.Compute) + } + if group.Compute.LaunchSpecification == nil { + group.Compute.LaunchSpecification = new(aws.LaunchSpecification) + } + + group.Compute.LaunchSpecification.SetTenancy(e.Tenancy) + changes.Tenancy = nil + } + } + } + } + + // Capacity. + { + if changes.MinSize != nil { + if group.Capacity == nil { + group.Capacity = new(aws.Capacity) + } + + group.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) + changes.MinSize = nil + + // Scale up the target capacity, if needed. + actual, err := e.find(cloud.Spotinst(), *e.Name) + if err == nil && actual != nil { + if int64(*actual.Capacity.Target) < *e.MinSize { + group.Capacity.SetTarget(fi.Int(int(*e.MinSize))) + } + } + } + if changes.MaxSize != nil { + if group.Capacity == nil { + group.Capacity = new(aws.Capacity) + } + + group.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + changes.MaxSize = nil + } + } + + // Auto Scaler. + { + if changes.AutoScalerEnabled != nil { + if group.Integration == nil { + group.Integration = new(aws.Integration) + } + if group.Integration.Kubernetes == nil { + group.Integration.Kubernetes = new(aws.KubernetesIntegration) + } + if group.Integration.Kubernetes.AutoScale == nil { + group.Integration.Kubernetes.AutoScale = new(aws.AutoScaleKubernetes) + } + + group.Integration.Kubernetes.AutoScale.SetIsEnabled(e.AutoScalerEnabled) + changes.AutoScalerEnabled = nil + } + + if nodeLabels := changes.AutoScalerNodeLabels; nodeLabels != nil && len(nodeLabels) > 0 { + if group.Integration == nil { + group.Integration = new(aws.Integration) + } + if group.Integration.Kubernetes == nil { + group.Integration.Kubernetes = new(aws.KubernetesIntegration) + } + if group.Integration.Kubernetes.AutoScale == nil { + group.Integration.Kubernetes.AutoScale = new(aws.AutoScaleKubernetes) + } + + group.Integration.Kubernetes.AutoScale.SetLabels(e.buildAutoScaleLabels(nodeLabels)) + changes.AutoScalerNodeLabels = nil + } + } + + empty := &Elastigroup{} + if !reflect.DeepEqual(empty, changes) { + glog.Warningf("Not all changes applied to elastigroup %q: %v", *group.ID, changes) + } + + if group.Compute == nil && + group.Capacity == nil && + group.Strategy == nil && + group.Integration == nil { + glog.V(2).Infof("No changes detected in elastigroup %q", *group.ID) + return nil + } + + glog.V(2).Infof("Updating elastigroup %q (config: %s)", *group.ID, stringutil.Stringify(group)) + + // Wrap the raw object as an Elastigroup. + eg, err := spotinst.NewElastigroup(cloud.ProviderID(), group) + if err != nil { + return err + } + + // Update the Elastigroup. + if err := cloud.Spotinst().Update(context.Background(), eg); err != nil { + return fmt.Errorf("spotinst: failed to update elastigroup: %v", err) + } + + return nil +} + +type terraformElastigroup struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Product *string `json:"product,omitempty"` + Region *string `json:"region,omitempty"` + SubnetIDs []*terraform.Literal `json:"subnet_ids,omitempty"` + LoadBalancers []*terraform.Literal `json:"elastic_load_balancers,omitempty"` + NetworkInterfaces []*terraformElastigroupNetworkInterface `json:"network_interface,omitempty"` + RootBlockDevice *terraformElastigroupBlockDevice `json:"ebs_block_device,omitempty"` + EphemeralBlockDevice []*terraformElastigroupBlockDevice `json:"ephemeral_block_device,omitempty"` + Integration *terraformElastigroupIntegration `json:"integration_kubernetes,omitempty"` + Tags []*terraformElastigroupTag `json:"tags,omitempty"` + + *terraformElastigroupCapacity + *terraformElastigroupStrategy + *terraformElastigroupInstanceTypes + *terraformElastigroupLaunchSpec +} + +type terraformElastigroupCapacity struct { + MinSize *int64 `json:"min_size,omitempty"` + MaxSize *int64 `json:"max_size,omitempty"` + DesiredCapacity *int64 `json:"desired_capacity,omitempty"` + CapacityUnit *string `json:"capacity_unit,omitempty"` +} + +type terraformElastigroupStrategy struct { + SpotPercentage *float64 `json:"spot_percentage,omitempty"` + Orientation *string `json:"orientation,omitempty"` + FallbackToOnDemand *bool `json:"fallback_to_ondemand,omitempty"` + UtilizeReservedInstances *bool `json:"utilize_reserved_instances,omitempty"` +} + +type terraformElastigroupInstanceTypes struct { + OnDemand *string `json:"instance_types_ondemand,omitempty"` + Spot []string `json:"instance_types_spot,omitempty"` +} + +type terraformElastigroupLaunchSpec struct { + Monitoring *bool `json:"enable_monitoring,omitempty"` + EBSOptimized *bool `json:"ebs_optimized,omitempty"` + ImageID *string `json:"image_id,omitempty"` + SecurityGroups []*terraform.Literal `json:"security_groups,omitempty"` + UserData *terraform.Literal `json:"user_data,omitempty"` + IAMInstanceProfile *terraform.Literal `json:"iam_instance_profile,omitempty"` + KeyName *terraform.Literal `json:"key_name,omitempty"` +} + +type terraformElastigroupBlockDevice struct { + DeviceName *string `json:"device_name,omitempty"` + VirtualName *string `json:"virtual_name,omitempty"` + VolumeType *string `json:"volume_type,omitempty"` + VolumeSize *int64 `json:"volume_size,omitempty"` + DeleteOnTermination *bool `json:"delete_on_termination,omitempty"` +} + +type terraformElastigroupNetworkInterface struct { + Description *string `json:"description,omitempty"` + DeviceIndex *int `json:"device_index,omitempty"` + AssociatePublicIPAddress *bool `json:"associate_public_ip_address,omitempty"` + DeleteOnTermination *bool `json:"delete_on_termination,omitempty"` +} + +type terraformElastigroupIntegration struct { + IntegrationMode *string `json:"integration_mode,omitempty"` + ClusterIdentifier *string `json:"cluster_identifier,omitempty"` + AutoScaleIsEnabled *bool `json:"autoscale_is_enabled,omitempty"` +} + +type terraformElastigroupTag struct { + Key *string `json:"key"` + Value *string `json:"value"` +} + +func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Elastigroup) error { + cloud := t.Cloud.(awsup.AWSCloud) + e.applyDefaults() + + tf := &terraformElastigroup{ + Name: e.Name, + Description: e.Name, + Product: e.Product, + Region: fi.String(cloud.Region()), + terraformElastigroupCapacity: &terraformElastigroupCapacity{ + DesiredCapacity: e.MinSize, + MinSize: e.MinSize, + MaxSize: e.MaxSize, + CapacityUnit: fi.String("instance"), + }, + terraformElastigroupStrategy: &terraformElastigroupStrategy{ + SpotPercentage: e.Risk, + Orientation: fi.String(string(normalizeOrientation(e.Orientation))), + FallbackToOnDemand: e.FallbackToOnDemand, + UtilizeReservedInstances: e.UtilizeReservedInstances, + }, + terraformElastigroupInstanceTypes: &terraformElastigroupInstanceTypes{ + OnDemand: e.OnDemandInstanceType, + Spot: e.SpotInstanceTypes, + }, + terraformElastigroupLaunchSpec: &terraformElastigroupLaunchSpec{}, + } + + // Image. + { + image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + if err != nil { + return err + } + tf.ImageID = image.ImageId + } + + var role string + for key := range e.Tags { + if strings.HasPrefix(key, awstasks.CloudTagInstanceGroupRolePrefix) { + suffix := strings.TrimPrefix(key, awstasks.CloudTagInstanceGroupRolePrefix) + if role != "" && role != suffix { + return fmt.Errorf("spotinst: found multiple role tags %q vs %q", role, suffix) + } + role = suffix + } + } + + // Security Groups. + { + for _, sg := range e.SecurityGroups { + tf.SecurityGroups = append(tf.SecurityGroups, sg.TerraformLink()) + if role != "" { + if err := t.AddOutputVariableArray(role+"_security_groups", sg.TerraformLink()); err != nil { + return err + } + } + } + } + + // User data. + { + if e.UserData != nil { + var err error + tf.UserData, err = t.AddFile("spotinst_elastigroup_aws", *e.Name, "user_data", e.UserData) + if err != nil { + return err + } + } + } + + // IAM Instance Profile. + { + if e.IAMInstanceProfile != nil { + tf.IAMInstanceProfile = e.IAMInstanceProfile.TerraformLink() + } + } + + // Monitoring. + { + if e.Monitoring != nil { + tf.Monitoring = e.Monitoring + } else { + tf.Monitoring = fi.Bool(false) + } + } + + // EBS Optimization. + { + if e.RootVolumeOptimization != nil { + tf.EBSOptimized = e.RootVolumeOptimization + } else { + tf.EBSOptimized = fi.Bool(false) + } + } + + // SSH Key pair. + { + if e.SSHKey != nil { + tf.KeyName = e.SSHKey.TerraformLink() + } + } + + // Subnets. + { + for _, subnet := range e.Subnets { + tf.SubnetIDs = append(tf.SubnetIDs, subnet.TerraformLink()) + if role != "" { + if err := t.AddOutputVariableArray(role+"_subnet_ids", subnet.TerraformLink()); err != nil { + return err + } + } + } + } + + // Load balancer. + { + if e.LoadBalancer != nil { + tf.LoadBalancers = append(tf.LoadBalancers, e.LoadBalancer.TerraformLink()) + } + } + + // Public IP. + { + if e.AssociatePublicIP != nil && *e.AssociatePublicIP { + tf.NetworkInterfaces = append(tf.NetworkInterfaces, &terraformElastigroupNetworkInterface{ + Description: fi.String("eth0"), + DeviceIndex: fi.Int(0), + AssociatePublicIPAddress: fi.Bool(true), + DeleteOnTermination: fi.Bool(true), + }) + } + } + + // Block Devices. + { + rootDevices, err := e.buildRootDevice(t.Cloud.(awsup.AWSCloud)) + if err != nil { + return err + } + + ephemeralDevices, err := e.buildEphemeralDevices(e.OnDemandInstanceType) + if err != nil { + return err + } + + if len(rootDevices) != 0 { + if len(rootDevices) != 1 { + return fmt.Errorf("unexpectedly found multiple root devices") + } + + for name, bdm := range rootDevices { + tf.RootBlockDevice = &terraformElastigroupBlockDevice{ + DeviceName: fi.String(name), + VolumeType: bdm.EbsVolumeType, + VolumeSize: bdm.EbsVolumeSize, + DeleteOnTermination: fi.Bool(true), + } + } + } + + if len(ephemeralDevices) != 0 { + tf.EphemeralBlockDevice = []*terraformElastigroupBlockDevice{} + for _, deviceName := range sets.StringKeySet(ephemeralDevices).List() { + bdm := ephemeralDevices[deviceName] + tf.EphemeralBlockDevice = append(tf.EphemeralBlockDevice, &terraformElastigroupBlockDevice{ + VirtualName: bdm.VirtualName, + DeviceName: fi.String(deviceName), + }) + } + } + } + + // Integration. + { + if e.ClusterIdentifier != nil { + tf.Integration = &terraformElastigroupIntegration{ + IntegrationMode: fi.String("pod"), + ClusterIdentifier: e.ClusterIdentifier, + } + if e.AutoScalerEnabled != nil { + tf.Integration.AutoScaleIsEnabled = e.AutoScalerEnabled + } + } + } + + // Tags. + { + tags := e.buildTags() + for _, tag := range tags { + tf.Tags = append(tf.Tags, &terraformElastigroupTag{ + Key: tag.Key, + Value: tag.Value, + }) + } + } + + return t.RenderResource("spotinst_elastigroup_aws", *e.Name, tf) +} + +func (e *Elastigroup) TerraformLink() *terraform.Literal { + return terraform.LiteralProperty("spotinst_elastigroup_aws", *e.Name, "id") +} + +func (e *Elastigroup) buildTags() []*aws.Tag { + tags := make([]*aws.Tag, 0, len(e.Tags)) + + for key, value := range e.Tags { + tags = append(tags, &aws.Tag{ + Key: fi.String(key), + Value: fi.String(value), + }) + } + + return tags +} + +func (e *Elastigroup) buildAutoScaleLabels(labelsMap map[string]string) []*aws.AutoScaleLabel { + labels := make([]*aws.AutoScaleLabel, 0, len(labelsMap)) + + for key, value := range labelsMap { + labels = append(labels, &aws.AutoScaleLabel{ + Key: fi.String(key), + Value: fi.String(value), + }) + } + + return labels +} + +func (e *Elastigroup) buildEphemeralDevices(instanceTypeName *string) (map[string]*awstasks.BlockDeviceMapping, error) { + if instanceTypeName == nil { + return nil, fi.RequiredField("InstanceType") + } + + instanceType, err := awsup.GetMachineTypeInfo(*instanceTypeName) + if err != nil { + return nil, err + } + + blockDeviceMappings := make(map[string]*awstasks.BlockDeviceMapping) + for _, ed := range instanceType.EphemeralDevices() { + m := &awstasks.BlockDeviceMapping{ + VirtualName: fi.String(ed.VirtualName), + } + blockDeviceMappings[ed.DeviceName] = m + } + + return blockDeviceMappings, nil +} + +func (e *Elastigroup) buildRootDevice(cloud awsup.AWSCloud) (map[string]*awstasks.BlockDeviceMapping, error) { + image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + if err != nil { + return nil, err + } + + rootDeviceName := fi.StringValue(image.RootDeviceName) + blockDeviceMappings := make(map[string]*awstasks.BlockDeviceMapping) + + rootDeviceMapping := &awstasks.BlockDeviceMapping{ + EbsDeleteOnTermination: fi.Bool(true), + EbsVolumeSize: e.RootVolumeSize, + EbsVolumeType: e.RootVolumeType, + EbsVolumeIops: e.RootVolumeIOPS, + } + blockDeviceMappings[rootDeviceName] = rootDeviceMapping + + return blockDeviceMappings, nil +} + +func (e *Elastigroup) buildBlockDeviceMapping(deviceName string, i *awstasks.BlockDeviceMapping) *aws.BlockDeviceMapping { + o := &aws.BlockDeviceMapping{} + o.DeviceName = fi.String(deviceName) + o.VirtualName = i.VirtualName + + if i.EbsDeleteOnTermination != nil || i.EbsVolumeSize != nil || i.EbsVolumeType != nil { + o.EBS = &aws.EBS{} + o.EBS.DeleteOnTermination = i.EbsDeleteOnTermination + o.EBS.VolumeSize = fi.Int(int(fi.Int64Value(i.EbsVolumeSize))) + o.EBS.VolumeType = i.EbsVolumeType + + // The parameter IOPS is not supported for gp2 volumes. + if fi.StringValue(i.EbsVolumeType) != "gp2" { + o.EBS.IOPS = fi.Int(int(fi.Int64Value(i.EbsVolumeIops))) + } + } + + return o +} + +func (e *Elastigroup) applyDefaults() { + if e.FallbackToOnDemand == nil { + e.FallbackToOnDemand = fi.Bool(true) + } + + if e.UtilizeReservedInstances == nil { + e.UtilizeReservedInstances = fi.Bool(true) + } + + if e.Product == nil || (e.Product != nil && fi.StringValue(e.Product) == "") { + e.Product = fi.String("Linux/UNIX") + } + + if e.Orientation == nil || (e.Orientation != nil && fi.StringValue(e.Orientation) == "") { + e.Orientation = fi.String("balanced") + } +} + +func resolveImage(cloud awsup.AWSCloud, name string) (*ec2.Image, error) { + image, err := cloud.ResolveImage(name) + if err != nil { + return nil, fmt.Errorf("spotinst: unable to resolve image %q: %v", name, err) + } else if image == nil { + return nil, fmt.Errorf("spotinst: unable to resolve image %q: not found", name) + } + + return image, nil +} + +func subnetSlicesEqualIgnoreOrder(l, r []*awstasks.Subnet) bool { + var lIDs []string + for _, s := range l { + lIDs = append(lIDs, *s.ID) + } + + var rIDs []string + for _, s := range r { + if s.ID == nil { + glog.V(4).Infof("Subnet ID not set; returning not-equal: %v", s) + return false + } + rIDs = append(rIDs, *s.ID) + } + + return utils.StringSlicesEqualIgnoreOrder(lIDs, rIDs) +} + +type Orientation string + +const ( + OrientationBalanced Orientation = "balanced" + OrientationCost Orientation = "costOriented" + OrientationAvailability Orientation = "availabilityOriented" + OrientationEqualZoneDistribution Orientation = "equalAzDistribution" +) + +func normalizeOrientation(orientation *string) Orientation { + out := OrientationBalanced + + // Fast path. + if orientation == nil { + return out + } + + switch *orientation { + case "cost": + out = OrientationCost + case "availability": + out = OrientationAvailability + case "equal-distribution": + out = OrientationEqualZoneDistribution + } + + return out +} diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup_fitask.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup_fitask.go new file mode 100644 index 0000000000..aaee0d1c84 --- /dev/null +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup_fitask.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by ""fitask" -type=Elastigroup"; DO NOT EDIT + +package spotinsttasks + +import ( + "encoding/json" + + "k8s.io/kops/upup/pkg/fi" +) + +// Elastigroup + +// JSON marshalling boilerplate +type realElastigroup Elastigroup + +// UnmarshalJSON implements conversion to JSON, supporitng an alternate specification of the object as a string +func (o *Elastigroup) UnmarshalJSON(data []byte) error { + var jsonName string + if err := json.Unmarshal(data, &jsonName); err == nil { + o.Name = &jsonName + return nil + } + + var r realElastigroup + if err := json.Unmarshal(data, &r); err != nil { + return err + } + *o = Elastigroup(r) + return nil +} + +var _ fi.HasLifecycle = &Elastigroup{} + +// GetLifecycle returns the Lifecycle of the object, implementing fi.HasLifecycle +func (o *Elastigroup) GetLifecycle() *fi.Lifecycle { + return o.Lifecycle +} + +// SetLifecycle sets the Lifecycle of the object, implementing fi.SetLifecycle +func (o *Elastigroup) SetLifecycle(lifecycle fi.Lifecycle) { + o.Lifecycle = &lifecycle +} + +var _ fi.HasName = &Elastigroup{} + +// GetName returns the Name of the object, implementing fi.HasName +func (o *Elastigroup) GetName() *string { + return o.Name +} + +// SetName sets the Name of the object, implementing fi.SetName +func (o *Elastigroup) SetName(name string) { + o.Name = &name +} + +// String is the stringer function for the task, producing readable output using fi.TaskAsString +func (o *Elastigroup) String() string { + return fi.TaskAsString(o) +} diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index b23ad8df43..97d9ebd276 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -37,7 +37,9 @@ import ( "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/dns" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/model" + "k8s.io/kops/pkg/resources/spotinst" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/gce" @@ -102,6 +104,13 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN") } + if featureflag.SpotinstIntegration.Enabled() { + if creds, err := spotinst.LoadCredentials(); err == nil { + dest["SpotinstToken"] = func() string { return creds.Token } + dest["SpotinstAccount"] = func() string { return creds.Account } + } + } + if tf.cluster.Spec.Networking != nil && tf.cluster.Spec.Networking.Flannel != nil { flannelBackendType := tf.cluster.Spec.Networking.Flannel.Backend if flannelBackendType == "" { From 4c8ac60bf3869175afe52eff76ffe2cae6d51639 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Thu, 11 Oct 2018 17:40:02 +0300 Subject: [PATCH 02/17] deps: vendor dependencies --- Gopkg.lock | 40 +- pkg/model/spotinstmodel/BUILD.bazel | 19 + pkg/resources/aws/BUILD.bazel | 2 + pkg/resources/spotinst/BUILD.bazel | 28 + upup/pkg/fi/cloudup/BUILD.bazel | 3 + upup/pkg/fi/cloudup/awsup/BUILD.bazel | 2 + upup/pkg/fi/cloudup/spotinsttasks/BUILD.bazel | 25 + .../service/elastigroup/BUILD.bazel | 17 + .../service/elastigroup/elastigroup.go | 47 + .../elastigroup/providers/aws/BUILD.bazel | 20 + .../service/elastigroup/providers/aws/aws.go | 3226 +++++++++++++++++ .../elastigroup/providers/aws/service.go | 44 + .../service/elastigroup/providers/aws/tag.go | 31 + .../elastigroup/providers/azure/BUILD.bazel | 20 + .../elastigroup/providers/azure/azure.go | 1486 ++++++++ .../elastigroup/providers/azure/service.go | 39 + .../elastigroup/providers/azure/tag.go | 31 + .../elastigroup/providers/gce/BUILD.bazel | 18 + .../elastigroup/providers/gce/service.go | 28 + .../service/elastigroup/providers/gce/tag.go | 31 + .../spotinst-sdk-go/spotinst/BUILD.bazel | 17 + .../spotinst/client/BUILD.bazel | 14 + .../spotinst-sdk-go/spotinst/client/client.go | 81 + .../spotinst/client/request.go | 76 + .../spotinst/client/response.go | 110 + .../spotinst-sdk-go/spotinst/config.go | 161 + .../spotinst/credentials/BUILD.bazel | 15 + .../spotinst/credentials/credentials.go | 86 + .../spotinst/credentials/provider_chain.go | 102 + .../spotinst/credentials/provider_env.go | 62 + .../spotinst/credentials/provider_file.go | 129 + .../spotinst/credentials/provider_static.go | 41 + .../spotinst-sdk-go/spotinst/log/BUILD.bazel | 9 + .../spotinst-sdk-go/spotinst/log/log.go | 26 + .../spotinst/session/BUILD.bazel | 10 + .../spotinst/session/session.go | 22 + .../spotinst-sdk-go/spotinst/types.go | 357 ++ .../spotinst/util/jsonutil/BUILD.bazel | 9 + .../spotinst/util/jsonutil/json.go | 184 + .../spotinst/util/stringutil/BUILD.bazel | 9 + .../spotinst/util/stringutil/stringutil.go | 69 + .../spotinst/util/uritemplates/BUILD.bazel | 12 + .../spotinst/util/uritemplates/LICENSE | 18 + .../util/uritemplates/uritemplates.go | 361 ++ .../spotinst/util/uritemplates/utils.go | 9 + .../spotinst-sdk-go/spotinst/version.go | 7 + .../client-go/util/workqueue/BUILD.bazel | 22 - .../util/workqueue/default_rate_limiters.go | 211 -- .../util/workqueue/delaying_queue.go | 255 -- vendor/k8s.io/client-go/util/workqueue/doc.go | 26 - .../client-go/util/workqueue/metrics.go | 195 - .../client-go/util/workqueue/parallelizer.go | 52 - .../k8s.io/client-go/util/workqueue/queue.go | 172 - .../util/workqueue/rate_limitting_queue.go | 69 - 54 files changed, 7144 insertions(+), 1011 deletions(-) create mode 100644 pkg/model/spotinstmodel/BUILD.bazel create mode 100644 pkg/resources/spotinst/BUILD.bazel create mode 100644 upup/pkg/fi/cloudup/spotinsttasks/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/service.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/tag.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/BUILD.bazel create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/delaying_queue.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/doc.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/metrics.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/parallelizer.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/queue.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go diff --git a/Gopkg.lock b/Gopkg.lock index 72ca484d2b..fd4d671ef7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1074,6 +1074,27 @@ pruneopts = "UT" revision = "7fb2782df3d83e0036cc89f461ed0422628776f4" +[[projects]] + branch = "master" + digest = "1:b410da742bf89fa5f255ef1d95bdf42d2a263a0c8f35272b8d5b4faff49017cf" + name = "github.com/spotinst/spotinst-sdk-go" + packages = [ + "service/elastigroup", + "service/elastigroup/providers/aws", + "service/elastigroup/providers/azure", + "service/elastigroup/providers/gce", + "spotinst", + "spotinst/client", + "spotinst/credentials", + "spotinst/log", + "spotinst/session", + "spotinst/util/jsonutil", + "spotinst/util/stringutil", + "spotinst/util/uritemplates", + ] + pruneopts = "UT" + revision = "da4a02277b0a4c7c4a007c9265082237ef1170e5" + [[projects]] digest = "1:67ba0f5b63fa937e1e78273904a1fa0f7c2358c4dac967ac16e678f8e50e8aa5" name = "github.com/stretchr/testify" @@ -1615,7 +1636,7 @@ version = "kubernetes-1.11.3" [[projects]] - digest = "1:b1d67a042544a528d86e37e7ace303dc74313d18c3aab360037db5b797ddde0b" + digest = "1:004509a1d109aec0996f8ea494a216f5e74268469661689178ca06134c9d6f02" name = "k8s.io/client-go" packages = [ "discovery", @@ -1794,7 +1815,6 @@ "util/integer", "util/jsonpath", "util/retry", - "util/workqueue", ] pruneopts = "UT" revision = "2cefa64ff137e128daeddbd1775cd775708a05bf" @@ -2178,6 +2198,14 @@ "github.com/spf13/cobra/doc", "github.com/spf13/pflag", "github.com/spf13/viper", + "github.com/spotinst/spotinst-sdk-go/service/elastigroup", + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws", + "github.com/spotinst/spotinst-sdk-go/spotinst", + "github.com/spotinst/spotinst-sdk-go/spotinst/client", + "github.com/spotinst/spotinst-sdk-go/spotinst/credentials", + "github.com/spotinst/spotinst-sdk-go/spotinst/log", + "github.com/spotinst/spotinst-sdk-go/spotinst/session", + "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil", "github.com/stretchr/testify/assert", "github.com/urfave/cli", "github.com/vmware/govmomi", @@ -2203,7 +2231,6 @@ "google.golang.org/api/storage/v1", "gopkg.in/gcfg.v1", "gopkg.in/yaml.v2", - "k8s.io/api/apps/v1", "k8s.io/api/core/v1", "k8s.io/api/extensions/v1beta1", "k8s.io/api/rbac/v1beta1", @@ -2228,6 +2255,7 @@ "k8s.io/apimachinery/pkg/util/validation", "k8s.io/apimachinery/pkg/util/validation/field", "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/util/yaml", "k8s.io/apimachinery/pkg/version", "k8s.io/apimachinery/pkg/watch", "k8s.io/apiserver/pkg/authentication/user", @@ -2243,23 +2271,17 @@ "k8s.io/apiserver/pkg/util/logs", "k8s.io/client-go/discovery", "k8s.io/client-go/discovery/fake", - "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/listers/apps/v1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/rest", "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/clientcmd", "k8s.io/client-go/tools/clientcmd/api", "k8s.io/client-go/tools/clientcmd/api/v1", - "k8s.io/client-go/tools/record", "k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/homedir", - "k8s.io/client-go/util/workqueue", "k8s.io/code-generator/cmd/client-gen", "k8s.io/code-generator/cmd/conversion-gen", "k8s.io/code-generator/cmd/deepcopy-gen", diff --git a/pkg/model/spotinstmodel/BUILD.bazel b/pkg/model/spotinstmodel/BUILD.bazel new file mode 100644 index 0000000000..ce34943af2 --- /dev/null +++ b/pkg/model/spotinstmodel/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["elastigroup.go"], + importpath = "k8s.io/kops/pkg/model/spotinstmodel", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/kops:go_default_library", + "//pkg/model:go_default_library", + "//pkg/model/awsmodel:go_default_library", + "//pkg/model/defaults:go_default_library", + "//upup/pkg/fi:go_default_library", + "//upup/pkg/fi/cloudup/awstasks:go_default_library", + "//upup/pkg/fi/cloudup/awsup:go_default_library", + "//upup/pkg/fi/cloudup/spotinsttasks:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + ], +) diff --git a/pkg/resources/aws/BUILD.bazel b/pkg/resources/aws/BUILD.bazel index c334ac80f2..e35e7542bf 100644 --- a/pkg/resources/aws/BUILD.bazel +++ b/pkg/resources/aws/BUILD.bazel @@ -18,7 +18,9 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/dns:go_default_library", + "//pkg/featureflag:go_default_library", "//pkg/resources:go_default_library", + "//pkg/resources/spotinst:go_default_library", "//upup/pkg/fi:go_default_library", "//upup/pkg/fi/cloudup/awsup:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", diff --git a/pkg/resources/spotinst/BUILD.bazel b/pkg/resources/spotinst/BUILD.bazel new file mode 100644 index 0000000000..c496ede08b --- /dev/null +++ b/pkg/resources/spotinst/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "aws.go", + "interfaces.go", + "resources.go", + "spotinst.go", + ], + importpath = "k8s.io/kops/pkg/resources/spotinst", + visibility = ["//visibility:public"], + deps = [ + "//:go_default_library", + "//pkg/apis/kops:go_default_library", + "//pkg/cloudinstances:go_default_library", + "//pkg/resources:go_default_library", + "//upup/pkg/fi:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) diff --git a/upup/pkg/fi/cloudup/BUILD.bazel b/upup/pkg/fi/cloudup/BUILD.bazel index 5699f2b8f7..8d70ce10b6 100644 --- a/upup/pkg/fi/cloudup/BUILD.bazel +++ b/upup/pkg/fi/cloudup/BUILD.bazel @@ -48,8 +48,10 @@ go_library( "//pkg/model/domodel:go_default_library", "//pkg/model/gcemodel:go_default_library", "//pkg/model/openstackmodel:go_default_library", + "//pkg/model/spotinstmodel:go_default_library", "//pkg/model/vspheremodel:go_default_library", "//pkg/resources/digitalocean:go_default_library", + "//pkg/resources/spotinst:go_default_library", "//pkg/templates:go_default_library", "//upup/models:go_default_library", "//upup/pkg/fi:go_default_library", @@ -66,6 +68,7 @@ go_library( "//upup/pkg/fi/cloudup/gcetasks:go_default_library", "//upup/pkg/fi/cloudup/openstack:go_default_library", "//upup/pkg/fi/cloudup/openstacktasks:go_default_library", + "//upup/pkg/fi/cloudup/spotinsttasks:go_default_library", "//upup/pkg/fi/cloudup/terraform:go_default_library", "//upup/pkg/fi/cloudup/vsphere:go_default_library", "//upup/pkg/fi/cloudup/vspheretasks:go_default_library", diff --git a/upup/pkg/fi/cloudup/awsup/BUILD.bazel b/upup/pkg/fi/cloudup/awsup/BUILD.bazel index 01e9c8881c..7230ba48af 100644 --- a/upup/pkg/fi/cloudup/awsup/BUILD.bazel +++ b/upup/pkg/fi/cloudup/awsup/BUILD.bazel @@ -21,6 +21,8 @@ go_library( "//pkg/apis/kops:go_default_library", "//pkg/apis/kops/model:go_default_library", "//pkg/cloudinstances:go_default_library", + "//pkg/featureflag:go_default_library", + "//pkg/resources/spotinst:go_default_library", "//protokube/pkg/etcd:go_default_library", "//upup/pkg/fi:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", diff --git a/upup/pkg/fi/cloudup/spotinsttasks/BUILD.bazel b/upup/pkg/fi/cloudup/spotinsttasks/BUILD.bazel new file mode 100644 index 0000000000..3807d6334c --- /dev/null +++ b/upup/pkg/fi/cloudup/spotinsttasks/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "elastigroup.go", + "elastigroup_fitask.go", + ], + importpath = "k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks", + visibility = ["//visibility:public"], + deps = [ + "//pkg/resources/spotinst:go_default_library", + "//upup/pkg/fi:go_default_library", + "//upup/pkg/fi/cloudup/awstasks:go_default_library", + "//upup/pkg/fi/cloudup/awsup:go_default_library", + "//upup/pkg/fi/cloudup/terraform:go_default_library", + "//upup/pkg/fi/utils:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/BUILD.bazel new file mode 100644 index 0000000000..4df44a3777 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["elastigroup.go"], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup", + importpath = "github.com/spotinst/spotinst-sdk-go/service/elastigroup", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go new file mode 100644 index 0000000000..6bab0322ae --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go @@ -0,0 +1,47 @@ +package elastigroup + +import ( + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to +// endpoints of the Spotinst API. See this package's package overview docs +// for details on the service. +type Service interface { + CloudProviderAWS() aws.Service + CloudProviderAzure() azure.Service + CloudProviderGCE() gce.Service +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(cfg), + } +} + +func (s *ServiceOp) CloudProviderAWS() aws.Service { + return &aws.ServiceOp{s.Client} +} + +func (s *ServiceOp) CloudProviderAzure() azure.Service { + return &azure.ServiceOp{s.Client} +} + +func (s *ServiceOp) CloudProviderGCE() gce.Service { + return &gce.ServiceOp{s.Client} +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/BUILD.bazel new file mode 100644 index 0000000000..5096dc9187 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "aws.go", + "service.go", + "tag.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws", + importpath = "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go new file mode 100644 index 0000000000..30bd2bd3e4 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go @@ -0,0 +1,3226 @@ +package aws + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "strconv" + "time" + + "fmt" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates" +) + +// A Product represents the type of an operating system. +type Product int + +const ( + // ProductWindows represents the Windows product. + ProductWindows Product = iota + + // ProductWindowsVPC represents the Windows (Amazon VPC) product. + ProductWindowsVPC + + // ProductLinuxUnix represents the Linux/Unix product. + ProductLinuxUnix + + // ProductLinuxUnixVPC represents the Linux/Unix (Amazon VPC) product. + ProductLinuxUnixVPC + + // ProductSUSELinux represents the SUSE Linux product. + ProductSUSELinux + + // ProductSUSELinuxVPC represents the SUSE Linux (Amazon VPC) product. + ProductSUSELinuxVPC +) + +var ProductName = map[Product]string{ + ProductWindows: "Windows", + ProductWindowsVPC: "Windows (Amazon VPC)", + ProductLinuxUnix: "Linux/UNIX", + ProductLinuxUnixVPC: "Linux/UNIX (Amazon VPC)", + ProductSUSELinux: "SUSE Linux", + ProductSUSELinuxVPC: "SUSE Linux (Amazon VPC)", +} + +var ProductValue = map[string]Product{ + "Windows": ProductWindows, + "Windows (Amazon VPC)": ProductWindowsVPC, + "Linux/UNIX": ProductLinuxUnix, + "Linux/UNIX (Amazon VPC)": ProductLinuxUnixVPC, + "SUSE Linux": ProductSUSELinux, + "SUSE Linux (Amazon VPC)": ProductSUSELinuxVPC, +} + +func (p Product) String() string { + return ProductName[p] +} + +type Group struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Region *string `json:"region,omitempty"` + Capacity *Capacity `json:"capacity,omitempty"` + Compute *Compute `json:"compute,omitempty"` + Strategy *Strategy `json:"strategy,omitempty"` + Scaling *Scaling `json:"scaling,omitempty"` + Scheduling *Scheduling `json:"scheduling,omitempty"` + Integration *Integration `json:"thirdPartiesIntegration,omitempty"` + + // forceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + forceSendFields []string + + // nullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + nullFields []string +} + +type Integration struct { + EC2ContainerService *EC2ContainerServiceIntegration `json:"ecs,omitempty"` + ElasticBeanstalk *ElasticBeanstalkIntegration `json:"elasticBeanstalk,omitempty"` + CodeDeploy *CodeDeployIntegration `json:"codeDeploy,omitempty"` + OpsWorks *OpsWorksIntegration `json:"opsWorks,omitempty"` + Rancher *RancherIntegration `json:"rancher,omitempty"` + Kubernetes *KubernetesIntegration `json:"kubernetes,omitempty"` + Mesosphere *MesosphereIntegration `json:"mesosphere,omitempty"` + Multai *MultaiIntegration `json:"mlbRuntime,omitempty"` + Nomad *NomadIntegration `json:"nomad,omitempty"` + Chef *ChefIntegration `json:"chef,omitempty"` + Gitlab *GitlabIntegration `json:"gitlab,omitempty"` + Route53 *Route53Integration `json:"route53,omitempty"` + DockerSwarm *DockerSwarmIntegration `json:"dockerSwarm,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScale struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + IsAutoConfig *bool `json:"isAutoConfig,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Headroom *AutoScaleHeadroom `json:"headroom,omitempty"` + Down *AutoScaleDown `json:"down,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleECS struct { + AutoScale // embedding + Attributes []*AutoScaleAttributes `json:"attributes,omitempty"` + ShouldScaleDownNonServiceTasks *bool `json:"shouldScaleDownNonServiceTasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleKubernetes struct { + AutoScale // embedding + Labels []*AutoScaleLabel `json:"labels,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleNomad struct { + AutoScale // embedding + Constraints []*AutoScaleConstraint `json:"constraints,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleDockerSwarm struct { + AutoScale // embedding + + forceSendFields []string + nullFields []string +} + +type AutoScaleHeadroom struct { + CPUPerUnit *int `json:"cpuPerUnit,omitempty"` + MemoryPerUnit *int `json:"memoryPerUnit,omitempty"` + NumOfUnits *int `json:"numOfUnits,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleDown struct { + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleConstraint struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleLabel struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleAttributes struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ElasticBeanstalkIntegration struct { + EnvironmentID *string `json:"environmentId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CodeDeployIntegration struct { + DeploymentGroups []*DeploymentGroup `json:"deploymentGroups,omitempty"` + CleanUpOnFailure *bool `json:"cleanUpOnFailure,omitempty"` + TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty"` + + forceSendFields []string + nullFields []string +} + +type DeploymentGroup struct { + ApplicationName *string `json:"applicationName,omitempty"` + DeploymentGroupName *string `json:"deploymentGroupName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type OpsWorksIntegration struct { + LayerID *string `json:"layerId,omitempty"` + StackType *string `json:"stackType,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RancherIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + AccessKey *string `json:"accessKey,omitempty"` + SecretKey *string `json:"secretKey,omitempty"` + Version *string `json:"version,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EC2ContainerServiceIntegration struct { + ClusterName *string `json:"clusterName,omitempty"` + AutoScale *AutoScaleECS `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type KubernetesIntegration struct { + IntegrationMode *string `json:"integrationMode,omitempty"` + ClusterIdentifier *string `json:"clusterIdentifier,omitempty"` + Server *string `json:"apiServer,omitempty"` + Token *string `json:"token,omitempty"` + AutoScale *AutoScaleKubernetes `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MesosphereIntegration struct { + Server *string `json:"apiServer,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MultaiIntegration struct { + DeploymentID *string `json:"deploymentId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type NomadIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + MasterPort *int `json:"masterPort,omitempty"` + ACLToken *string `json:"aclToken,omitempty"` + AutoScale *AutoScaleNomad `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ChefIntegration struct { + Server *string `json:"chefServer,omitempty"` + Organization *string `json:"organization,omitempty"` + User *string `json:"user,omitempty"` + PEMKey *string `json:"pemKey,omitempty"` + Version *string `json:"chefVersion,omitempty"` + + forceSendFields []string + nullFields []string +} + +type DockerSwarmIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + MasterPort *int `json:"masterPort,omitempty"` + AutoScale *AutoScaleDockerSwarm `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Route53Integration struct { + Domains []*Domain `json:"domains,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Domain struct { + HostedZoneID *string `json:"hostedZoneId,omitempty"` + RecordSets []*RecordSet `json:"recordSets,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RecordSet struct { + UsePublicIP *bool `json:"usePublicIp,omitempty"` + Name *string `json:"name,omitempty"` + + forceSendFields []string + nullFields []string +} + +type GitlabIntegration struct { + Runner *GitlabRunner `json:"runner,omitempty"` + + forceSendFields []string + nullFields []string +} + +type GitlabRunner struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Scheduling struct { + Tasks []*Task `json:"tasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Task struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + Type *string `json:"taskType,omitempty"` + Frequency *string `json:"frequency,omitempty"` + CronExpression *string `json:"cronExpression,omitempty"` + StartTime *string `json:"startTime,omitempty"` + ScaleTargetCapacity *int `json:"scaleTargetCapacity,omitempty"` + ScaleMinCapacity *int `json:"scaleMinCapacity,omitempty"` + ScaleMaxCapacity *int `json:"scaleMaxCapacity,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + TargetCapacity *int `json:"targetCapacity,omitempty"` + MinCapacity *int `json:"minCapacity,omitempty"` + MaxCapacity *int `json:"maxCapacity,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Scaling struct { + Up []*ScalingPolicy `json:"up,omitempty"` + Down []*ScalingPolicy `json:"down,omitempty"` + Target []*ScalingPolicy `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ScalingPolicy struct { + PolicyName *string `json:"policyName,omitempty"` + MetricName *string `json:"metricName,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Source *string `json:"source,omitempty"` + Statistic *string `json:"statistic,omitempty"` + Unit *string `json:"unit,omitempty"` + Threshold *float64 `json:"threshold,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` + MinTargetCapacity *int `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *int `json:"maxTargetCapacity,omitempty"` + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + Period *int `json:"period,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Operator *string `json:"operator,omitempty"` + Dimensions []*Dimension `json:"dimensions,omitempty"` + Action *Action `json:"action,omitempty"` + Target *float64 `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Action struct { + Type *string `json:"type,omitempty"` + Adjustment *string `json:"adjustment,omitempty"` + MinTargetCapacity *string `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *string `json:"maxTargetCapacity,omitempty"` + Maximum *string `json:"maximum,omitempty"` + Minimum *string `json:"minimum,omitempty"` + Target *string `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Dimension struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Strategy struct { + Risk *float64 `json:"risk,omitempty"` + OnDemandCount *int `json:"onDemandCount,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + AvailabilityVsCost *string `json:"availabilityVsCost,omitempty"` + LifetimePeriod *string `json:"lifetimePeriod,omitempty"` + UtilizeReservedInstances *bool `json:"utilizeReservedInstances,omitempty"` + FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"` + SpinUpTime *int `json:"spinUpTime,omitempty"` + Signals []*Signal `json:"signals,omitempty"` + Persistence *Persistence `json:"persistence,omitempty"` + RevertToSpot *RevertToSpot `json:"revertToSpot,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Persistence struct { + ShouldPersistPrivateIP *bool `json:"shouldPersistPrivateIp,omitempty"` + ShouldPersistBlockDevices *bool `json:"shouldPersistBlockDevices,omitempty"` + ShouldPersistRootDevice *bool `json:"shouldPersistRootDevice,omitempty"` + BlockDevicesMode *string `json:"blockDevicesMode,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RevertToSpot struct { + PerformAt *string `json:"performAt,omitempty"` + TimeWindows []string `json:"timeWindows,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Signal struct { + Name *string `json:"name,omitempty"` + Timeout *int `json:"timeout,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Capacity struct { + Minimum *int `json:"minimum,omitempty"` + Maximum *int `json:"maximum,omitempty"` + Target *int `json:"target,omitempty"` + Unit *string `json:"unit,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Compute struct { + Product *string `json:"product,omitempty"` + InstanceTypes *InstanceTypes `json:"instanceTypes,omitempty"` + LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"` + AvailabilityZones []*AvailabilityZone `json:"availabilityZones,omitempty"` + PreferredAvailabilityZones []string `json:"preferredAvailabilityZones,omitempty"` + ElasticIPs []string `json:"elasticIps,omitempty"` + EBSVolumePool []*EBSVolume `json:"ebsVolumePool,omitempty"` + PrivateIPs []string `json:"privateIps,omitempty"` + SubnetIDs []string `json:"subnetIds,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EBSVolume struct { + DeviceName *string `json:"deviceName,omitempty"` + VolumeIDs []string `json:"volumeIds,omitempty"` + + forceSendFields []string + nullFields []string +} + +type InstanceTypes struct { + OnDemand *string `json:"ondemand,omitempty"` + Spot []string `json:"spot,omitempty"` + PreferredSpot []string `json:"preferredSpot,omitempty"` + Weights []*InstanceTypeWeight `json:"weights,omitempty"` + + forceSendFields []string + nullFields []string +} + +type InstanceTypeWeight struct { + InstanceType *string `json:"instanceType,omitempty"` + Weight *int `json:"weightedCapacity,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AvailabilityZone struct { + Name *string `json:"name,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` + PlacementGroupName *string `json:"placementGroupName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LaunchSpecification struct { + LoadBalancerNames []string `json:"loadBalancerNames,omitempty"` + LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"` + SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + HealthCheckGracePeriod *int `json:"healthCheckGracePeriod,omitempty"` + HealthCheckUnhealthyDurationBeforeReplacement *int `json:"healthCheckUnhealthyDurationBeforeReplacement,omitempty"` + ImageID *string `json:"imageId,omitempty"` + KeyPair *string `json:"keyPair,omitempty"` + UserData *string `json:"userData,omitempty"` + ShutdownScript *string `json:"shutdownScript,omitempty"` + Tenancy *string `json:"tenancy,omitempty"` + Monitoring *bool `json:"monitoring,omitempty"` + EBSOptimized *bool `json:"ebsOptimized,omitempty"` + IAMInstanceProfile *IAMInstanceProfile `json:"iamRole,omitempty"` + BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"` + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancersConfig struct { + LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancer struct { + Name *string `json:"name,omitempty"` + Arn *string `json:"arn,omitempty"` + Type *string `json:"type,omitempty"` + BalancerID *string `json:"balancerId,omitempty"` + TargetSetID *string `json:"targetSetId,omitempty"` + ZoneAwareness *bool `json:"azAwareness,omitempty"` + AutoWeight *bool `json:"autoWeight,omitempty"` + + forceSendFields []string + nullFields []string +} + +type NetworkInterface struct { + ID *string `json:"networkInterfaceId,omitempty"` + Description *string `json:"description,omitempty"` + DeviceIndex *int `json:"deviceIndex,omitempty"` + SecondaryPrivateIPAddressCount *int `json:"secondaryPrivateIpAddressCount,omitempty"` + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"` + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + SecurityGroupsIDs []string `json:"groups,omitempty"` + PrivateIPAddress *string `json:"privateIpAddress,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BlockDeviceMapping struct { + DeviceName *string `json:"deviceName,omitempty"` + VirtualName *string `json:"virtualName,omitempty"` + EBS *EBS `json:"ebs,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EBS struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + KmsKeyId *string `json:"kmsKeyId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` + VolumeSize *int `json:"volumeSize,omitempty"` + IOPS *int `json:"iops,omitempty"` + + forceSendFields []string + nullFields []string +} + +type IAMInstanceProfile struct { + Name *string `json:"name,omitempty"` + Arn *string `json:"arn,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Instance struct { + ID *string `json:"instanceId,omitempty"` + SpotRequestID *string `json:"spotInstanceRequestId,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` + Status *string `json:"status,omitempty"` + Product *string `json:"product,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + PrivateIP *string `json:"privateIp,omitempty"` + PublicIP *string `json:"publicIp,omitempty"` + CreatedAt *time.Time `json:"createdAt,omitempty"` +} + +type RollStrategy struct { + Action *string `json:"action,omitempty"` + ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ListGroupsInput struct{} + +type ListGroupsOutput struct { + Groups []*Group `json:"groups,omitempty"` +} + +type CreateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type CreateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type ReadGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ReadGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupInput struct { + Group *Group `json:"group,omitempty"` + ShouldResumeStateful *bool `json:"-"` +} + +type UpdateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type DeleteGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulDeallocation *StatefulDeallocation `json:"statefulDeallocation,omitempty"` +} + +type StatefulDeallocation struct { + ShouldDeleteImages *bool `json:"shouldDeleteImages,omitempty"` + ShouldDeleteNetworkInterfaces *bool `json:"shouldDeleteNetworkInterfaces,omitempty"` + ShouldDeleteVolumes *bool `json:"shouldDeleteVolumes,omitempty"` + ShouldDeleteSnapshots *bool `json:"shouldDeleteSnapshots,omitempty"` +} + +type DeleteGroupOutput struct{} + +type StatusGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type StatusGroupOutput struct { + Instances []*Instance `json:"instances,omitempty"` +} + +type DetachGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + InstanceIDs []string `json:"instancesToDetach,omitempty"` + ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"` + ShouldTerminateInstances *bool `json:"shouldTerminateInstances,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` +} + +type DetachGroupOutput struct{} + +type RollGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + Strategy *RollStrategy `json:"strategy,omitempty"` +} + +type RollGroupOutput struct{} + +func groupFromJSON(in []byte) (*Group, error) { + b := new(Group) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func groupsFromJSON(in []byte) ([]*Group, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Group, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupsFromJSON(body) +} + +func instanceFromJSON(in []byte) (*Instance, error) { + b := new(Instance) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func instancesFromJSON(in []byte) ([]*Instance, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Instance, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := instanceFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func instancesFromHttpResponse(resp *http.Response) ([]*Instance, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return instancesFromJSON(body) +} + +func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) { + r := client.NewRequest(http.MethodGet, "/aws/ec2/group") + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListGroupsOutput{Groups: gs}, nil +} + +func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) { + r := client.NewRequest(http.MethodPost, "/aws/ec2/group") + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ReadGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.Group.ID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.Group.ID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + if input.ShouldResumeStateful != nil { + r.Params.Set("shouldResumeStateful", + strconv.FormatBool(spotinst.BoolValue(input.ShouldResumeStateful))) + } + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(UpdateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodDelete, path) + + if input.StatefulDeallocation != nil { + r.Obj = &DeleteGroupInput{ + StatefulDeallocation: input.StatefulDeallocation, + } + } + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteGroupOutput{}, nil +} + +func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/status", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + is, err := instancesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &StatusGroupOutput{Instances: is}, nil +} + +func (s *ServiceOp) Detach(ctx context.Context, input *DetachGroupInput) (*DetachGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/detachInstances", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DetachGroupOutput{}, nil +} + +func (s *ServiceOp) Roll(ctx context.Context, input *RollGroupInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &RollGroupOutput{}, nil +} + +// region: Elastic Beanstalk + +type ImportBeanstalkInput struct { + EnvironmentName *string `json:"environmentName,omitempty"` + Region *string `json:"region,omitempty"` +} + +type ImportBeanstalkOutput struct { + Group *Group `json:"group,omitempty"` +} + +type BeanstalkMaintenanceInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type BeanstalkMaintenanceItem struct { + Status *string `json:"status,omitempty"` +} + +type BeanstalkMaintenanceOutput struct { + Items []*BeanstalkMaintenanceItem `json:"items,omitempty"` + Status *string `json:"status,omitempty"` +} + +func beanstalkMaintResponseFromJSON(in []byte) (*BeanstalkMaintenanceOutput, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + + var retVal BeanstalkMaintenanceOutput + retVal.Items = make([]*BeanstalkMaintenanceItem, len(rw.Response.Items)) + for i, rb := range rw.Response.Items { + b, err := beanstalkMaintItemFromJSON(rb) + if err != nil { + return nil, err + } + retVal.Items[i] = b + retVal.Status = b.Status + } + return &retVal, nil +} + +func beanstalkMaintItemFromJSON(in []byte) (*BeanstalkMaintenanceItem, error) { + var rw *BeanstalkMaintenanceItem + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + return rw, nil +} + +func beanstalkMaintFromHttpResponse(resp *http.Response) (*BeanstalkMaintenanceOutput, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return beanstalkMaintResponseFromJSON(body) +} + +func (s *ServiceOp) ImportBeanstalkEnv(ctx context.Context, input *ImportBeanstalkInput) (*ImportBeanstalkOutput, error) { + path := "/aws/ec2/group/beanstalk/import" + r := client.NewRequest(http.MethodGet, path) + + r.Params["environmentName"] = []string{spotinst.StringValue(input.EnvironmentName)} + r.Params["region"] = []string{spotinst.StringValue(input.Region)} + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ImportBeanstalkOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) StartBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/start", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + fmt.Printf("Status: %v\n", resp.Status) + + return &BeanstalkMaintenanceOutput{}, nil +} + +func (s *ServiceOp) GetBeanstalkMaintenanceStatus(ctx context.Context, input *BeanstalkMaintenanceInput) (*string, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/status", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + output, err := beanstalkMaintFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return output.Status, nil +} + +func (s *ServiceOp) FinishBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/finish", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + fmt.Printf("Status: %v\n", resp.Status) + + return &BeanstalkMaintenanceOutput{}, nil +} + +// endregion + +// region Group + +func (o *Group) MarshalJSON() ([]byte, error) { + type noMethod Group + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Group) SetId(v *string) *Group { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *Group) SetName(v *string) *Group { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Group) SetDescription(v *string) *Group { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +func (o *Group) SetCapacity(v *Capacity) *Group { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +func (o *Group) SetCompute(v *Compute) *Group { + if o.Compute = v; o.Compute == nil { + o.nullFields = append(o.nullFields, "Compute") + } + return o +} + +func (o *Group) SetStrategy(v *Strategy) *Group { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +func (o *Group) SetScaling(v *Scaling) *Group { + if o.Scaling = v; o.Scaling == nil { + o.nullFields = append(o.nullFields, "Scaling") + } + return o +} + +func (o *Group) SetScheduling(v *Scheduling) *Group { + if o.Scheduling = v; o.Scheduling == nil { + o.nullFields = append(o.nullFields, "Scheduling") + } + return o +} + +func (o *Group) SetIntegration(v *Integration) *Group { + if o.Integration = v; o.Integration == nil { + o.nullFields = append(o.nullFields, "Integration") + } + return o +} + +func (o *Group) SetRegion(v *string) *Group { + if o.Region = v; o.Region == nil { + o.nullFields = append(o.nullFields, "Region") + } + return o +} + +// endregion + +// region Integration + +func (o *Integration) MarshalJSON() ([]byte, error) { + type noMethod Integration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Integration) SetRoute53(v *Route53Integration) *Integration { + if o.Route53 = v; o.Route53 == nil { + o.nullFields = append(o.nullFields, "Route53") + } + return o +} + +func (o *Integration) SetDockerSwarm(v *DockerSwarmIntegration) *Integration { + if o.DockerSwarm = v; o.DockerSwarm == nil { + o.nullFields = append(o.nullFields, "DockerSwarm") + } + return o +} + +func (o *Integration) SetEC2ContainerService(v *EC2ContainerServiceIntegration) *Integration { + if o.EC2ContainerService = v; o.EC2ContainerService == nil { + o.nullFields = append(o.nullFields, "EC2ContainerService") + } + return o +} + +func (o *Integration) SetElasticBeanstalk(v *ElasticBeanstalkIntegration) *Integration { + if o.ElasticBeanstalk = v; o.ElasticBeanstalk == nil { + o.nullFields = append(o.nullFields, "ElasticBeanstalk") + } + return o +} + +func (o *Integration) SetCodeDeploy(v *CodeDeployIntegration) *Integration { + if o.CodeDeploy = v; o.CodeDeploy == nil { + o.nullFields = append(o.nullFields, "CodeDeploy") + } + return o +} + +func (o *Integration) SetOpsWorks(v *OpsWorksIntegration) *Integration { + if o.OpsWorks = v; o.OpsWorks == nil { + o.nullFields = append(o.nullFields, "OpsWorks") + } + return o +} + +func (o *Integration) SetRancher(v *RancherIntegration) *Integration { + if o.Rancher = v; o.Rancher == nil { + o.nullFields = append(o.nullFields, "Rancher") + } + return o +} + +func (o *Integration) SetKubernetes(v *KubernetesIntegration) *Integration { + if o.Kubernetes = v; o.Kubernetes == nil { + o.nullFields = append(o.nullFields, "Kubernetes") + } + return o +} + +func (o *Integration) SetMesosphere(v *MesosphereIntegration) *Integration { + if o.Mesosphere = v; o.Mesosphere == nil { + o.nullFields = append(o.nullFields, "Mesosphere") + } + return o +} + +func (o *Integration) SetMultai(v *MultaiIntegration) *Integration { + if o.Multai = v; o.Multai == nil { + o.nullFields = append(o.nullFields, "Multai") + } + return o +} + +func (o *Integration) SetNomad(v *NomadIntegration) *Integration { + if o.Nomad = v; o.Nomad == nil { + o.nullFields = append(o.nullFields, "Nomad") + } + return o +} + +func (o *Integration) SetChef(v *ChefIntegration) *Integration { + if o.Chef = v; o.Chef == nil { + o.nullFields = append(o.nullFields, "Chef") + } + return o +} + +func (o *Integration) SetGitlab(v *GitlabIntegration) *Integration { + if o.Gitlab = v; o.Gitlab == nil { + o.nullFields = append(o.nullFields, "Gitlab") + } + return o +} + +// endregion + +// region RancherIntegration + +func (o *RancherIntegration) MarshalJSON() ([]byte, error) { + type noMethod RancherIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RancherIntegration) SetMasterHost(v *string) *RancherIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *RancherIntegration) SetAccessKey(v *string) *RancherIntegration { + if o.AccessKey = v; o.AccessKey == nil { + o.nullFields = append(o.nullFields, "AccessKey") + } + return o +} + +func (o *RancherIntegration) SetSecretKey(v *string) *RancherIntegration { + if o.SecretKey = v; o.SecretKey == nil { + o.nullFields = append(o.nullFields, "SecretKey") + } + return o +} + +func (o *RancherIntegration) SetVersion(v *string) *RancherIntegration { + if o.Version = v; o.Version == nil { + o.nullFields = append(o.nullFields, "Version") + } + return o +} + +// endregion + +// region ElasticBeanstalkIntegration + +func (o *ElasticBeanstalkIntegration) MarshalJSON() ([]byte, error) { + type noMethod ElasticBeanstalkIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ElasticBeanstalkIntegration) SetEnvironmentID(v *string) *ElasticBeanstalkIntegration { + if o.EnvironmentID = v; o.EnvironmentID == nil { + o.nullFields = append(o.nullFields, "EnvironmentID") + } + return o +} + +// endregion + +// region EC2ContainerServiceIntegration + +func (o *EC2ContainerServiceIntegration) MarshalJSON() ([]byte, error) { + type noMethod EC2ContainerServiceIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EC2ContainerServiceIntegration) SetClusterName(v *string) *EC2ContainerServiceIntegration { + if o.ClusterName = v; o.ClusterName == nil { + o.nullFields = append(o.nullFields, "ClusterName") + } + return o +} + +func (o *AutoScaleECS) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleECS + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EC2ContainerServiceIntegration) SetAutoScale(v *AutoScaleECS) *EC2ContainerServiceIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o *AutoScaleECS) SetAttributes(v []*AutoScaleAttributes) *AutoScaleECS { + if o.Attributes = v; o.Attributes == nil { + o.nullFields = append(o.nullFields, "Attributes") + } + return o +} + +func (o *AutoScaleECS) SetShouldScaleDownNonServiceTasks(v *bool) *AutoScaleECS { + if o.ShouldScaleDownNonServiceTasks = v; o.ShouldScaleDownNonServiceTasks == nil { + o.nullFields = append(o.nullFields, "ShouldScaleDownNonServiceTasks") + } + return o +} + +// endregion + +// region Docker Swarm + +func (o *DockerSwarmIntegration) MarshalJSON() ([]byte, error) { + type noMethod DockerSwarmIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *DockerSwarmIntegration) SetMasterHost(v *string) *DockerSwarmIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *DockerSwarmIntegration) SetMasterPort(v *int) *DockerSwarmIntegration { + if o.MasterPort = v; o.MasterPort == nil { + o.nullFields = append(o.nullFields, "MasterPort") + } + return o +} + +func (o *DockerSwarmIntegration) SetAutoScale(v *AutoScaleDockerSwarm) *DockerSwarmIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o *AutoScaleDockerSwarm) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleDockerSwarm + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// end region + +// region Route53 + +func (o *Route53Integration) MarshalJSON() ([]byte, error) { + type noMethod Route53Integration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Route53Integration) SetDomains(v []*Domain) *Route53Integration { + if o.Domains = v; o.Domains == nil { + o.nullFields = append(o.nullFields, "Domains") + } + return o +} + +// endregion + +// region Domain + +func (o *Domain) MarshalJSON() ([]byte, error) { + type noMethod Domain + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Domain) SetHostedZoneID(v *string) *Domain { + if o.HostedZoneID = v; o.HostedZoneID == nil { + o.nullFields = append(o.nullFields, "HostedZoneID") + } + return o +} + +func (o *Domain) SetRecordSets(v []*RecordSet) *Domain { + if o.RecordSets = v; o.RecordSets == nil { + o.nullFields = append(o.nullFields, "RecordSets") + } + return o +} + +// endregion + +// region RecordSets + +func (o *RecordSet) MarshalJSON() ([]byte, error) { + type noMethod RecordSet + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RecordSet) SetUsePublicIP(v *bool) *RecordSet { + if o.UsePublicIP = v; o.UsePublicIP == nil { + o.nullFields = append(o.nullFields, "UsePublicIP") + } + return o +} + +func (o *RecordSet) SetName(v *string) *RecordSet { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// endregion + +// region AutoScale + +func (o *AutoScale) MarshalJSON() ([]byte, error) { + type noMethod AutoScale + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScale) SetIsEnabled(v *bool) *AutoScale { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *AutoScale) SetIsAutoConfig(v *bool) *AutoScale { + if o.IsAutoConfig = v; o.IsAutoConfig == nil { + o.nullFields = append(o.nullFields, "IsAutoConfig") + } + return o +} + +func (o *AutoScale) SetCooldown(v *int) *AutoScale { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *AutoScale) SetHeadroom(v *AutoScaleHeadroom) *AutoScale { + if o.Headroom = v; o.Headroom == nil { + o.nullFields = append(o.nullFields, "Headroom") + } + return o +} + +func (o *AutoScale) SetDown(v *AutoScaleDown) *AutoScale { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +// endregion + +// region AutoScaleHeadroom + +func (o *AutoScaleHeadroom) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleHeadroom + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleHeadroom) SetCPUPerUnit(v *int) *AutoScaleHeadroom { + if o.CPUPerUnit = v; o.CPUPerUnit == nil { + o.nullFields = append(o.nullFields, "CPUPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetMemoryPerUnit(v *int) *AutoScaleHeadroom { + if o.MemoryPerUnit = v; o.MemoryPerUnit == nil { + o.nullFields = append(o.nullFields, "MemoryPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetNumOfUnits(v *int) *AutoScaleHeadroom { + if o.NumOfUnits = v; o.NumOfUnits == nil { + o.nullFields = append(o.nullFields, "NumOfUnits") + } + return o +} + +// endregion + +// region AutoScaleDown + +func (o *AutoScaleDown) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleDown + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleDown) SetEvaluationPeriods(v *int) *AutoScaleDown { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +// endregion + +// region AutoScaleConstraint + +func (o *AutoScaleConstraint) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleConstraint + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleConstraint) SetKey(v *string) *AutoScaleConstraint { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *AutoScaleConstraint) SetValue(v *string) *AutoScaleConstraint { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region AutoScaleLabel + +func (o *AutoScaleLabel) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleLabel + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleLabel) SetKey(v *string) *AutoScaleLabel { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *AutoScaleLabel) SetValue(v *string) *AutoScaleLabel { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region KubernetesIntegration + +func (o *KubernetesIntegration) MarshalJSON() ([]byte, error) { + type noMethod KubernetesIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *KubernetesIntegration) SetIntegrationMode(v *string) *KubernetesIntegration { + if o.IntegrationMode = v; o.IntegrationMode == nil { + o.nullFields = append(o.nullFields, "IntegrationMode") + } + return o +} + +func (o *KubernetesIntegration) SetClusterIdentifier(v *string) *KubernetesIntegration { + if o.ClusterIdentifier = v; o.ClusterIdentifier == nil { + o.nullFields = append(o.nullFields, "ClusterIdentifier") + } + return o +} + +func (o *KubernetesIntegration) SetServer(v *string) *KubernetesIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +func (o *KubernetesIntegration) SetToken(v *string) *KubernetesIntegration { + if o.Token = v; o.Token == nil { + o.nullFields = append(o.nullFields, "Token") + } + return o +} + +func (o *KubernetesIntegration) SetAutoScale(v *AutoScaleKubernetes) *KubernetesIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o *AutoScaleKubernetes) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleKubernetes + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleKubernetes) SetLabels(v []*AutoScaleLabel) *AutoScaleKubernetes { + if o.Labels = v; o.Labels == nil { + o.nullFields = append(o.nullFields, "Labels") + } + return o +} + +// endregion + +// region MesosphereIntegration + +func (o *MesosphereIntegration) MarshalJSON() ([]byte, error) { + type noMethod MesosphereIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MesosphereIntegration) SetServer(v *string) *MesosphereIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +// endregion + +// region MultaiIntegration + +func (o *MultaiIntegration) MarshalJSON() ([]byte, error) { + type noMethod MultaiIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MultaiIntegration) SetDeploymentId(v *string) *MultaiIntegration { + if o.DeploymentID = v; o.DeploymentID == nil { + o.nullFields = append(o.nullFields, "DeploymentID") + } + return o +} + +// endregion + +// region NomadIntegration + +func (o *NomadIntegration) MarshalJSON() ([]byte, error) { + type noMethod NomadIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *NomadIntegration) SetMasterHost(v *string) *NomadIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *NomadIntegration) SetMasterPort(v *int) *NomadIntegration { + if o.MasterPort = v; o.MasterPort == nil { + o.nullFields = append(o.nullFields, "MasterPort") + } + return o +} + +func (o *NomadIntegration) SetAclToken(v *string) *NomadIntegration { + if o.ACLToken = v; o.ACLToken == nil { + o.nullFields = append(o.nullFields, "ACLToken") + } + return o +} + +func (o *NomadIntegration) SetAutoScale(v *AutoScaleNomad) *NomadIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o *AutoScaleNomad) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleNomad + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleNomad) SetConstraints(v []*AutoScaleConstraint) *AutoScaleNomad { + if o.Constraints = v; o.Constraints == nil { + o.nullFields = append(o.nullFields, "Constraints") + } + return o +} + +// endregion + +// region ChefIntegration + +func (o *ChefIntegration) MarshalJSON() ([]byte, error) { + type noMethod ChefIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ChefIntegration) SetServer(v *string) *ChefIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +func (o *ChefIntegration) SetOrganization(v *string) *ChefIntegration { + if o.Organization = v; o.Organization == nil { + o.nullFields = append(o.nullFields, "Organization") + } + return o +} + +func (o *ChefIntegration) SetUser(v *string) *ChefIntegration { + if o.User = v; o.User == nil { + o.nullFields = append(o.nullFields, "User") + } + return o +} + +func (o *ChefIntegration) SetPEMKey(v *string) *ChefIntegration { + if o.PEMKey = v; o.PEMKey == nil { + o.nullFields = append(o.nullFields, "PEMKey") + } + return o +} + +func (o *ChefIntegration) SetVersion(v *string) *ChefIntegration { + if o.Version = v; o.Version == nil { + o.nullFields = append(o.nullFields, "Version") + } + return o +} + +// endregion + +//region Gitlab +func (o *GitlabIntegration) MarshalJSON() ([]byte, error) { + type noMethod GitlabIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *GitlabIntegration) SetRunner(v *GitlabRunner) *GitlabIntegration { + if o.Runner = v; o.Runner == nil { + o.nullFields = append(o.nullFields, "Runner") + } + return o +} + +func (o *GitlabRunner) MarshalJSON() ([]byte, error) { + type noMethod GitlabRunner + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *GitlabRunner) SetIsEnabled(v *bool) *GitlabRunner { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +//endregion + +// region Scheduling + +func (o *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scheduling) SetTasks(v []*Task) *Scheduling { + if o.Tasks = v; o.Tasks == nil { + o.nullFields = append(o.nullFields, "Tasks") + } + return o +} + +// endregion + +// region Task + +func (o *Task) MarshalJSON() ([]byte, error) { + type noMethod Task + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Task) SetIsEnabled(v *bool) *Task { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *Task) SetType(v *string) *Task { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Task) SetFrequency(v *string) *Task { + if o.Frequency = v; o.Frequency == nil { + o.nullFields = append(o.nullFields, "Frequency") + } + return o +} + +func (o *Task) SetCronExpression(v *string) *Task { + if o.CronExpression = v; o.CronExpression == nil { + o.nullFields = append(o.nullFields, "CronExpression") + } + return o +} + +func (o *Task) SetStartTime(v *string) *Task { + if o.StartTime = v; o.StartTime == nil { + o.nullFields = append(o.nullFields, "StartTime") + } + return o +} + +func (o *Task) SetScaleTargetCapacity(v *int) *Task { + if o.ScaleTargetCapacity = v; o.ScaleTargetCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleTargetCapacity") + } + return o +} + +func (o *Task) SetScaleMinCapacity(v *int) *Task { + if o.ScaleMinCapacity = v; o.ScaleMinCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMinCapacity") + } + return o +} + +func (o *Task) SetScaleMaxCapacity(v *int) *Task { + if o.ScaleMaxCapacity = v; o.ScaleMaxCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMaxCapacity") + } + return o +} + +func (o *Task) SetBatchSizePercentage(v *int) *Task { + if o.BatchSizePercentage = v; o.BatchSizePercentage == nil { + o.nullFields = append(o.nullFields, "BatchSizePercentage") + } + return o +} + +func (o *Task) SetGracePeriod(v *int) *Task { + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +func (o *Task) SetTargetCapacity(v *int) *Task { + if o.TargetCapacity = v; o.TargetCapacity == nil { + o.nullFields = append(o.nullFields, "TargetCapacity") + } + return o +} + +func (o *Task) SetMinCapacity(v *int) *Task { + if o.MinCapacity = v; o.MinCapacity == nil { + o.nullFields = append(o.nullFields, "MinCapacity") + } + return o +} + +func (o *Task) SetMaxCapacity(v *int) *Task { + if o.MaxCapacity = v; o.MaxCapacity == nil { + o.nullFields = append(o.nullFields, "MaxCapacity") + } + return o +} + +// endregion + +// region Scaling + +func (o *Scaling) MarshalJSON() ([]byte, error) { + type noMethod Scaling + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling { + if o.Up = v; o.Up == nil { + o.nullFields = append(o.nullFields, "Up") + } + return o +} + +func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +func (o *Scaling) SetTarget(v []*ScalingPolicy) *Scaling { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region ScalingPolicy + +func (o *ScalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod ScalingPolicy + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy { + if o.PolicyName = v; o.PolicyName == nil { + o.nullFields = append(o.nullFields, "PolicyName") + } + return o +} + +func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy { + if o.MetricName = v; o.MetricName == nil { + o.nullFields = append(o.nullFields, "MetricName") + } + return o +} + +func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy { + if o.Namespace = v; o.Namespace == nil { + o.nullFields = append(o.nullFields, "Namespace") + } + return o +} + +func (o *ScalingPolicy) SetSource(v *string) *ScalingPolicy { + if o.Source = v; o.Source == nil { + o.nullFields = append(o.nullFields, "Source") + } + return o +} + +func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy { + if o.Statistic = v; o.Statistic == nil { + o.nullFields = append(o.nullFields, "Statistic") + } + return o +} + +func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy { + if o.Threshold = v; o.Threshold == nil { + o.nullFields = append(o.nullFields, "Threshold") + } + return o +} + +func (o *ScalingPolicy) SetAdjustment(v *int) *ScalingPolicy { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *ScalingPolicy) SetMinTargetCapacity(v *int) *ScalingPolicy { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetMaxTargetCapacity(v *int) *ScalingPolicy { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy { + if o.Period = v; o.Period == nil { + o.nullFields = append(o.nullFields, "Period") + } + return o +} + +func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy { + if o.Operator = v; o.Operator == nil { + o.nullFields = append(o.nullFields, "Operator") + } + return o +} + +func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy { + if o.Dimensions = v; o.Dimensions == nil { + o.nullFields = append(o.nullFields, "Dimensions") + } + return o +} + +func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *ScalingPolicy) SetTarget(v *float64) *ScalingPolicy { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Action + +func (o *Action) MarshalJSON() ([]byte, error) { + type noMethod Action + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Action) SetType(v *string) *Action { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Action) SetAdjustment(v *string) *Action { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *Action) SetMinTargetCapacity(v *string) *Action { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *Action) SetMaxTargetCapacity(v *string) *Action { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *Action) SetMaximum(v *string) *Action { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Action) SetMinimum(v *string) *Action { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Action) SetTarget(v *string) *Action { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Dimension + +func (o *Dimension) MarshalJSON() ([]byte, error) { + type noMethod Dimension + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Dimension) SetName(v *string) *Dimension { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Dimension) SetValue(v *string) *Dimension { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region Strategy + +func (o *Strategy) MarshalJSON() ([]byte, error) { + type noMethod Strategy + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Strategy) SetRisk(v *float64) *Strategy { + if o.Risk = v; o.Risk == nil { + o.nullFields = append(o.nullFields, "Risk") + } + return o +} + +func (o *Strategy) SetOnDemandCount(v *int) *Strategy { + if o.OnDemandCount = v; o.OnDemandCount == nil { + o.nullFields = append(o.nullFields, "OnDemandCount") + } + return o +} + +func (o *Strategy) SetDrainingTimeout(v *int) *Strategy { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +func (o *Strategy) SetAvailabilityVsCost(v *string) *Strategy { + if o.AvailabilityVsCost = v; o.AvailabilityVsCost == nil { + o.nullFields = append(o.nullFields, "AvailabilityVsCost") + } + return o +} + +func (o *Strategy) SetLifetimePeriod(v *string) *Strategy { + if o.LifetimePeriod = v; o.LifetimePeriod == nil { + o.nullFields = append(o.nullFields, "LifetimePeriod") + } + return o +} + +func (o *Strategy) SetUtilizeReservedInstances(v *bool) *Strategy { + if o.UtilizeReservedInstances = v; o.UtilizeReservedInstances == nil { + o.nullFields = append(o.nullFields, "UtilizeReservedInstances") + } + return o +} + +func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy { + if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil { + o.nullFields = append(o.nullFields, "FallbackToOnDemand") + } + return o +} + +func (o *Strategy) SetSpinUpTime(v *int) *Strategy { + if o.SpinUpTime = v; o.SpinUpTime == nil { + o.nullFields = append(o.nullFields, "SpinUpTime") + } + return o +} + +func (o *Strategy) SetSignals(v []*Signal) *Strategy { + if o.Signals = v; o.Signals == nil { + o.nullFields = append(o.nullFields, "Signals") + } + return o +} + +func (o *Strategy) SetPersistence(v *Persistence) *Strategy { + if o.Persistence = v; o.Persistence == nil { + o.nullFields = append(o.nullFields, "Persistence") + } + return o +} + +func (o *Strategy) SetRevertToSpot(v *RevertToSpot) *Strategy { + if o.RevertToSpot = v; o.RevertToSpot == nil { + o.nullFields = append(o.nullFields, "RevertToSpot") + } + return o +} + +// endregion + +// region Persistence + +func (o *Persistence) MarshalJSON() ([]byte, error) { + type noMethod Persistence + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Persistence) SetShouldPersistPrivateIP(v *bool) *Persistence { + if o.ShouldPersistPrivateIP = v; o.ShouldPersistPrivateIP == nil { + o.nullFields = append(o.nullFields, "ShouldPersistPrivateIP") + } + return o +} + +func (o *Persistence) SetShouldPersistBlockDevices(v *bool) *Persistence { + if o.ShouldPersistBlockDevices = v; o.ShouldPersistBlockDevices == nil { + o.nullFields = append(o.nullFields, "ShouldPersistBlockDevices") + } + return o +} + +func (o *Persistence) SetShouldPersistRootDevice(v *bool) *Persistence { + if o.ShouldPersistRootDevice = v; o.ShouldPersistRootDevice == nil { + o.nullFields = append(o.nullFields, "ShouldPersistRootDevice") + } + return o +} + +func (o *Persistence) SetBlockDevicesMode(v *string) *Persistence { + if o.BlockDevicesMode = v; o.BlockDevicesMode == nil { + o.nullFields = append(o.nullFields, "BlockDevicesMode") + } + return o +} + +// endregion + +// region RevertToSpot + +func (o *RevertToSpot) MarshalJSON() ([]byte, error) { + type noMethod RevertToSpot + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RevertToSpot) SetPerformAt(v *string) *RevertToSpot { + if o.PerformAt = v; o.PerformAt == nil { + o.nullFields = append(o.nullFields, "PerformAt") + } + return o +} + +func (o *RevertToSpot) SetTimeWindows(v []string) *RevertToSpot { + if o.TimeWindows = v; o.TimeWindows == nil { + o.nullFields = append(o.nullFields, "TimeWindows") + } + return o +} + +// endregion + +// region Signal + +func (o *Signal) MarshalJSON() ([]byte, error) { + type noMethod Signal + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Signal) SetName(v *string) *Signal { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Signal) SetTimeout(v *int) *Signal { + if o.Timeout = v; o.Timeout == nil { + o.nullFields = append(o.nullFields, "Timeout") + } + return o +} + +// endregion + +// region Capacity + +func (o *Capacity) MarshalJSON() ([]byte, error) { + type noMethod Capacity + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Capacity) SetMinimum(v *int) *Capacity { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Capacity) SetMaximum(v *int) *Capacity { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Capacity) SetTarget(v *int) *Capacity { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +func (o *Capacity) SetUnit(v *string) *Capacity { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +// endregion + +// region Compute + +func (o *Compute) MarshalJSON() ([]byte, error) { + type noMethod Compute + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Compute) SetProduct(v *string) *Compute { + if o.Product = v; o.Product == nil { + o.nullFields = append(o.nullFields, "Product") + } + + return o +} + +func (o *Compute) SetPrivateIPs(v []string) *Compute { + if o.PrivateIPs = v; o.PrivateIPs == nil { + o.nullFields = append(o.nullFields, "PrivateIPs") + } + + return o +} + +func (o *Compute) SetInstanceTypes(v *InstanceTypes) *Compute { + if o.InstanceTypes = v; o.InstanceTypes == nil { + o.nullFields = append(o.nullFields, "InstanceTypes") + } + return o +} + +func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute { + if o.LaunchSpecification = v; o.LaunchSpecification == nil { + o.nullFields = append(o.nullFields, "LaunchSpecification") + } + return o +} + +func (o *Compute) SetAvailabilityZones(v []*AvailabilityZone) *Compute { + if o.AvailabilityZones = v; o.AvailabilityZones == nil { + o.nullFields = append(o.nullFields, "AvailabilityZones") + } + return o +} + +func (o *Compute) SetPreferredAvailabilityZones(v []string) *Compute { + if o.PreferredAvailabilityZones = v; o.PreferredAvailabilityZones == nil { + o.nullFields = append(o.nullFields, "PreferredAvailabilityZones") + } + return o +} + +func (o *Compute) SetElasticIPs(v []string) *Compute { + if o.ElasticIPs = v; o.ElasticIPs == nil { + o.nullFields = append(o.nullFields, "ElasticIPs") + } + return o +} + +func (o *Compute) SetEBSVolumePool(v []*EBSVolume) *Compute { + if o.EBSVolumePool = v; o.EBSVolumePool == nil { + o.nullFields = append(o.nullFields, "EBSVolumePool") + } + return o +} + +func (o *Compute) SetSubnetIDs(v []string) *Compute { + if o.SubnetIDs = v; o.SubnetIDs == nil { + o.nullFields = append(o.nullFields, "SubnetIDs") + } + return o +} + +// endregion + +// region EBSVolume + +func (o *EBSVolume) MarshalJSON() ([]byte, error) { + type noMethod EBSVolume + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EBSVolume) SetDeviceName(v *string) *EBSVolume { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +func (o *EBSVolume) SetVolumeIDs(v []string) *EBSVolume { + if o.VolumeIDs = v; o.VolumeIDs == nil { + o.nullFields = append(o.nullFields, "VolumeIDs") + } + return o +} + +// endregion + +// region InstanceTypes + +func (o *InstanceTypes) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypes + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *InstanceTypes) SetOnDemand(v *string) *InstanceTypes { + if o.OnDemand = v; o.OnDemand == nil { + o.nullFields = append(o.nullFields, "OnDemand") + } + return o +} + +func (o *InstanceTypes) SetSpot(v []string) *InstanceTypes { + if o.Spot = v; o.Spot == nil { + o.nullFields = append(o.nullFields, "Spot") + } + return o +} + +func (o *InstanceTypes) SetPreferredSpot(v []string) *InstanceTypes { + if o.PreferredSpot = v; o.PreferredSpot == nil { + o.nullFields = append(o.nullFields, "PreferredSpot") + } + return o +} + +func (o *InstanceTypes) SetWeights(v []*InstanceTypeWeight) *InstanceTypes { + if o.Weights = v; o.Weights == nil { + o.nullFields = append(o.nullFields, "Weights") + } + return o +} + +// endregion + +// region InstanceTypeWeight + +func (o *InstanceTypeWeight) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypeWeight + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *InstanceTypeWeight) SetInstanceType(v *string) *InstanceTypeWeight { + if o.InstanceType = v; o.InstanceType == nil { + o.nullFields = append(o.nullFields, "InstanceType") + } + return o +} + +func (o *InstanceTypeWeight) SetWeight(v *int) *InstanceTypeWeight { + if o.Weight = v; o.Weight == nil { + o.nullFields = append(o.nullFields, "Weight") + } + return o +} + +// endregion + +// region AvailabilityZone + +func (o *AvailabilityZone) MarshalJSON() ([]byte, error) { + type noMethod AvailabilityZone + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AvailabilityZone) SetName(v *string) *AvailabilityZone { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *AvailabilityZone) SetSubnetId(v *string) *AvailabilityZone { + if o.SubnetID = v; o.SubnetID == nil { + o.nullFields = append(o.nullFields, "SubnetID") + } + return o +} + +func (o *AvailabilityZone) SetPlacementGroupName(v *string) *AvailabilityZone { + if o.PlacementGroupName = v; o.PlacementGroupName == nil { + o.nullFields = append(o.nullFields, "PlacementGroupName") + } + return o +} + +// endregion + +// region LaunchSpecification + +func (o *LaunchSpecification) MarshalJSON() ([]byte, error) { + type noMethod LaunchSpecification + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LaunchSpecification) SetLoadBalancerNames(v []string) *LaunchSpecification { + if o.LoadBalancerNames = v; o.LoadBalancerNames == nil { + o.nullFields = append(o.nullFields, "LoadBalancerNames") + } + return o +} + +func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification { + if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil { + o.nullFields = append(o.nullFields, "LoadBalancersConfig") + } + return o +} + +func (o *LaunchSpecification) SetSecurityGroupIDs(v []string) *LaunchSpecification { + if o.SecurityGroupIDs = v; o.SecurityGroupIDs == nil { + o.nullFields = append(o.nullFields, "SecurityGroupIDs") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckType(v *string) *LaunchSpecification { + if o.HealthCheckType = v; o.HealthCheckType == nil { + o.nullFields = append(o.nullFields, "HealthCheckType") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckGracePeriod(v *int) *LaunchSpecification { + if o.HealthCheckGracePeriod = v; o.HealthCheckGracePeriod == nil { + o.nullFields = append(o.nullFields, "HealthCheckGracePeriod") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckUnhealthyDurationBeforeReplacement(v *int) *LaunchSpecification { + if o.HealthCheckUnhealthyDurationBeforeReplacement = v; o.HealthCheckUnhealthyDurationBeforeReplacement == nil { + o.nullFields = append(o.nullFields, "HealthCheckUnhealthyDurationBeforeReplacement") + } + return o +} + +func (o *LaunchSpecification) SetImageId(v *string) *LaunchSpecification { + if o.ImageID = v; o.ImageID == nil { + o.nullFields = append(o.nullFields, "ImageID") + } + return o +} + +func (o *LaunchSpecification) SetKeyPair(v *string) *LaunchSpecification { + if o.KeyPair = v; o.KeyPair == nil { + o.nullFields = append(o.nullFields, "KeyPair") + } + return o +} + +func (o *LaunchSpecification) SetUserData(v *string) *LaunchSpecification { + if o.UserData = v; o.UserData == nil { + o.nullFields = append(o.nullFields, "UserData") + } + return o +} + +func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification { + if o.ShutdownScript = v; o.ShutdownScript == nil { + o.nullFields = append(o.nullFields, "ShutdownScript") + } + return o +} + +func (o *LaunchSpecification) SetTenancy(v *string) *LaunchSpecification { + if o.Tenancy = v; o.Tenancy == nil { + o.nullFields = append(o.nullFields, "Tenancy") + } + return o +} + +func (o *LaunchSpecification) SetMonitoring(v *bool) *LaunchSpecification { + if o.Monitoring = v; o.Monitoring == nil { + o.nullFields = append(o.nullFields, "Monitoring") + } + return o +} + +func (o *LaunchSpecification) SetEBSOptimized(v *bool) *LaunchSpecification { + if o.EBSOptimized = v; o.EBSOptimized == nil { + o.nullFields = append(o.nullFields, "EBSOptimized") + } + return o +} + +func (o *LaunchSpecification) SetIAMInstanceProfile(v *IAMInstanceProfile) *LaunchSpecification { + if o.IAMInstanceProfile = v; o.IAMInstanceProfile == nil { + o.nullFields = append(o.nullFields, "IAMInstanceProfile") + } + return o +} + +func (o *LaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *LaunchSpecification { + if o.BlockDeviceMappings = v; o.BlockDeviceMappings == nil { + o.nullFields = append(o.nullFields, "BlockDeviceMappings") + } + return o +} + +func (o *LaunchSpecification) SetNetworkInterfaces(v []*NetworkInterface) *LaunchSpecification { + if o.NetworkInterfaces = v; o.NetworkInterfaces == nil { + o.nullFields = append(o.nullFields, "NetworkInterfaces") + } + return o +} + +func (o *LaunchSpecification) SetTags(v []*Tag) *LaunchSpecification { + if o.Tags = v; o.Tags == nil { + o.nullFields = append(o.nullFields, "Tags") + } + return o +} + +// endregion + +// region LoadBalancersConfig + +func (o *LoadBalancersConfig) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancersConfig + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig { + if o.LoadBalancers = v; o.LoadBalancers == nil { + o.nullFields = append(o.nullFields, "LoadBalancers") + } + return o +} + +// endregion + +// region LoadBalancer + +func (o *LoadBalancer) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancer + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancer) SetName(v *string) *LoadBalancer { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *LoadBalancer) SetArn(v *string) *LoadBalancer { + if o.Arn = v; o.Arn == nil { + o.nullFields = append(o.nullFields, "Arn") + } + return o +} + +func (o *LoadBalancer) SetType(v *string) *LoadBalancer { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *LoadBalancer) SetBalancerId(v *string) *LoadBalancer { + if o.BalancerID = v; o.BalancerID == nil { + o.nullFields = append(o.nullFields, "BalancerID") + } + return o +} + +func (o *LoadBalancer) SetTargetSetId(v *string) *LoadBalancer { + if o.TargetSetID = v; o.TargetSetID == nil { + o.nullFields = append(o.nullFields, "TargetSetID") + } + return o +} + +func (o *LoadBalancer) SetZoneAwareness(v *bool) *LoadBalancer { + if o.ZoneAwareness = v; o.ZoneAwareness == nil { + o.nullFields = append(o.nullFields, "ZoneAwareness") + } + return o +} + +func (o *LoadBalancer) SetAutoWeight(v *bool) *LoadBalancer { + if o.AutoWeight = v; o.AutoWeight == nil { + o.nullFields = append(o.nullFields, "AutoWeight") + } + return o +} + +// endregion + +// region NetworkInterface + +func (o *NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *NetworkInterface) SetId(v *string) *NetworkInterface { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *NetworkInterface) SetDescription(v *string) *NetworkInterface { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +func (o *NetworkInterface) SetDeviceIndex(v *int) *NetworkInterface { + if o.DeviceIndex = v; o.DeviceIndex == nil { + o.nullFields = append(o.nullFields, "DeviceIndex") + } + return o +} + +func (o *NetworkInterface) SetSecondaryPrivateIPAddressCount(v *int) *NetworkInterface { + if o.SecondaryPrivateIPAddressCount = v; o.SecondaryPrivateIPAddressCount == nil { + o.nullFields = append(o.nullFields, "SecondaryPrivateIPAddressCount") + } + return o +} + +func (o *NetworkInterface) SetAssociatePublicIPAddress(v *bool) *NetworkInterface { + if o.AssociatePublicIPAddress = v; o.AssociatePublicIPAddress == nil { + o.nullFields = append(o.nullFields, "AssociatePublicIPAddress") + } + return o +} + +func (o *NetworkInterface) SetDeleteOnTermination(v *bool) *NetworkInterface { + if o.DeleteOnTermination = v; o.DeleteOnTermination == nil { + o.nullFields = append(o.nullFields, "DeleteOnTermination") + } + return o +} + +func (o *NetworkInterface) SetSecurityGroupsIDs(v []string) *NetworkInterface { + if o.SecurityGroupsIDs = v; o.SecurityGroupsIDs == nil { + o.nullFields = append(o.nullFields, "SecurityGroupsIDs") + } + return o +} + +func (o *NetworkInterface) SetPrivateIPAddress(v *string) *NetworkInterface { + if o.PrivateIPAddress = v; o.PrivateIPAddress == nil { + o.nullFields = append(o.nullFields, "PrivateIPAddress") + } + return o +} + +func (o *NetworkInterface) SetSubnetId(v *string) *NetworkInterface { + if o.SubnetID = v; o.SubnetID == nil { + o.nullFields = append(o.nullFields, "SubnetID") + } + return o +} + +// endregion + +// region BlockDeviceMapping + +func (o *BlockDeviceMapping) MarshalJSON() ([]byte, error) { + type noMethod BlockDeviceMapping + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BlockDeviceMapping) SetDeviceName(v *string) *BlockDeviceMapping { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +func (o *BlockDeviceMapping) SetVirtualName(v *string) *BlockDeviceMapping { + if o.VirtualName = v; o.VirtualName == nil { + o.nullFields = append(o.nullFields, "VirtualName") + } + return o +} + +func (o *BlockDeviceMapping) SetEBS(v *EBS) *BlockDeviceMapping { + if o.EBS = v; o.EBS == nil { + o.nullFields = append(o.nullFields, "EBS") + } + return o +} + +// endregion + +// region EBS + +func (o *EBS) MarshalJSON() ([]byte, error) { + type noMethod EBS + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EBS) SetDeleteOnTermination(v *bool) *EBS { + if o.DeleteOnTermination = v; o.DeleteOnTermination == nil { + o.nullFields = append(o.nullFields, "DeleteOnTermination") + } + return o +} + +func (o *EBS) SetEncrypted(v *bool) *EBS { + if o.Encrypted = v; o.Encrypted == nil { + o.nullFields = append(o.nullFields, "Encrypted") + } + return o +} + +func (o *EBS) SetKmsKeyId(v *string) *EBS { + if o.KmsKeyId = v; o.KmsKeyId == nil { + o.nullFields = append(o.nullFields, "KmsKeyId") + } + return o +} + +func (o *EBS) SetSnapshotId(v *string) *EBS { + if o.SnapshotID = v; o.SnapshotID == nil { + o.nullFields = append(o.nullFields, "SnapshotID") + } + return o +} + +func (o *EBS) SetVolumeType(v *string) *EBS { + if o.VolumeType = v; o.VolumeType == nil { + o.nullFields = append(o.nullFields, "VolumeType") + } + return o +} + +func (o *EBS) SetVolumeSize(v *int) *EBS { + if o.VolumeSize = v; o.VolumeSize == nil { + o.nullFields = append(o.nullFields, "VolumeSize") + } + return o +} + +func (o *EBS) SetIOPS(v *int) *EBS { + if o.IOPS = v; o.IOPS == nil { + o.nullFields = append(o.nullFields, "IOPS") + } + return o +} + +// endregion + +// region IAMInstanceProfile + +func (o *IAMInstanceProfile) MarshalJSON() ([]byte, error) { + type noMethod IAMInstanceProfile + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *IAMInstanceProfile) SetName(v *string) *IAMInstanceProfile { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *IAMInstanceProfile) SetArn(v *string) *IAMInstanceProfile { + if o.Arn = v; o.Arn == nil { + o.nullFields = append(o.nullFields, "Arn") + } + return o +} + +// endregion + +// region RollStrategy + +func (o *RollStrategy) MarshalJSON() ([]byte, error) { + type noMethod RollStrategy + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RollStrategy) SetAction(v *string) *RollStrategy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *RollStrategy) SetShouldDrainInstances(v *bool) *RollStrategy { + if o.ShouldDrainInstances = v; o.ShouldDrainInstances == nil { + o.nullFields = append(o.nullFields, "ShouldDrainInstances") + } + return o +} + +// endregion + +// region CodeDeployIntegration + +func (o *CodeDeployIntegration) MarshalJSON() ([]byte, error) { + type noMethod CodeDeployIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *CodeDeployIntegration) SetDeploymentGroups(v []*DeploymentGroup) *CodeDeployIntegration { + if o.DeploymentGroups = v; o.DeploymentGroups == nil { + o.nullFields = append(o.nullFields, "DeploymentGroups") + } + return o +} + +func (o *CodeDeployIntegration) SetCleanUpOnFailure(v *bool) *CodeDeployIntegration { + if o.CleanUpOnFailure = v; o.CleanUpOnFailure == nil { + o.nullFields = append(o.nullFields, "CleanUpOnFailure") + } + return o +} + +func (o *CodeDeployIntegration) SetTerminateInstanceOnFailure(v *bool) *CodeDeployIntegration { + if o.TerminateInstanceOnFailure = v; o.TerminateInstanceOnFailure == nil { + o.nullFields = append(o.nullFields, "TerminateInstanceOnFailure") + } + return o +} + +// endregion + +// region DeploymentGroup + +func (o *DeploymentGroup) MarshalJSON() ([]byte, error) { + type noMethod DeploymentGroup + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *DeploymentGroup) SetApplicationName(v *string) *DeploymentGroup { + if o.ApplicationName = v; o.ApplicationName == nil { + o.nullFields = append(o.nullFields, "ApplicationName") + } + return o +} + +func (o *DeploymentGroup) SetDeploymentGroupName(v *string) *DeploymentGroup { + if o.DeploymentGroupName = v; o.DeploymentGroupName == nil { + o.nullFields = append(o.nullFields, "DeploymentGroupName") + } + return o +} + +// endregion + +// region OpsWorksIntegration + +func (o *OpsWorksIntegration) MarshalJSON() ([]byte, error) { + type noMethod OpsWorksIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *OpsWorksIntegration) SetLayerId(v *string) *OpsWorksIntegration { + if o.LayerID = v; o.LayerID == nil { + o.nullFields = append(o.nullFields, "LayerID") + } + return o +} + +func (o *OpsWorksIntegration) SetStackType(v *string) *OpsWorksIntegration { + if o.StackType = v; o.StackType == nil { + o.nullFields = append(o.nullFields, "StackType") + } + return o +} + +// endregion + +// region Scale Request + +type ScaleUpSpotItem struct { + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` +} + +type ScaleUpOnDemandItem struct { + InstanceID *string `json:"instanceId,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` +} + +type ScaleDownSpotItem struct { + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"` +} + +type ScaleDownOnDemandItem struct { + InstanceID *string `json:"instanceId,omitempty"` +} + +type ScaleItem struct { + NewSpotRequests []*ScaleUpSpotItem `json:"newSpotRequests,omitempty"` + NewInstances []*ScaleUpOnDemandItem `json:"newInstances,omitempty"` + VictimSpotRequests []*ScaleDownSpotItem `json:"victimSpotRequests,omitempty"` + VictimInstances []*ScaleDownOnDemandItem `json:"victimInstances,omitempty"` +} + +type ScaleGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + ScaleType *string `json:"type,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` +} + +type ScaleGroupOutput struct { + Items []*ScaleItem `json:"items"` +} + +func scaleUpResponseFromJSON(in []byte) (*ScaleGroupOutput, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + + var retVal ScaleGroupOutput + retVal.Items = make([]*ScaleItem, len(rw.Response.Items)) + for i, rb := range rw.Response.Items { + b, err := scaleUpItemFromJSON(rb) + if err != nil { + return nil, err + } + retVal.Items[i] = b + } + + return &retVal, nil +} + +func scaleUpItemFromJSON(in []byte) (*ScaleItem, error) { + var rw *ScaleItem + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + return rw, nil +} + +func scaleFromHttpResponse(resp *http.Response) (*ScaleGroupOutput, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return scaleUpResponseFromJSON(body) +} + +func (s *ServiceOp) Scale(ctx context.Context, input *ScaleGroupInput) (*ScaleGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/scale/{type}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "type": spotinst.StringValue(input.ScaleType), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + + if input.Adjustment != nil { + r.Params.Set("adjustment", strconv.Itoa(*input.Adjustment)) + } + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + output, err := scaleFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return output, err +} + +//endregion diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go new file mode 100644 index 0000000000..fbbec9c010 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go @@ -0,0 +1,44 @@ +package aws + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to +// endpoints of the Spotinst API. See this package's package overview docs +// for details on the service. +type Service interface { + List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error) + Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error) + Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error) + Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error) + Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error) + Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error) + Detach(context.Context, *DetachGroupInput) (*DetachGroupOutput, error) + Roll(context.Context, *RollGroupInput) (*RollGroupOutput, error) + Scale(context.Context, *ScaleGroupInput) (*ScaleGroupOutput, error) + ImportBeanstalkEnv(context.Context, *ImportBeanstalkInput) (*ImportBeanstalkOutput, error) + StartBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) + FinishBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) + GetBeanstalkMaintenanceStatus(context.Context, *BeanstalkMaintenanceInput) (*string, error) +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go new file mode 100644 index 0000000000..7bd1aafd7d --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go @@ -0,0 +1,31 @@ +package aws + +import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + +type Tag struct { + Key *string `json:"tagKey,omitempty"` + Value *string `json:"tagValue,omitempty"` + + forceSendFields []string `json:"-"` + nullFields []string `json:"-"` +} + +func (o *Tag) MarshalJSON() ([]byte, error) { + type noMethod Tag + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tag) SetKey(v *string) *Tag { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *Tag) SetValue(v *string) *Tag { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/BUILD.bazel new file mode 100644 index 0000000000..a6ea066cab --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "azure.go", + "service.go", + "tag.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure", + importpath = "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go new file mode 100644 index 0000000000..db7f0fb609 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go @@ -0,0 +1,1486 @@ +package azure + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates" +) + +type Group struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Capacity *Capacity `json:"capacity,omitempty"` + Compute *Compute `json:"compute,omitempty"` + Strategy *Strategy `json:"strategy,omitempty"` + Scaling *Scaling `json:"scaling,omitempty"` + Scheduling *Scheduling `json:"scheduling,omitempty"` + Integration *Integration `json:"thirdPartiesIntegration,omitempty"` + + // forceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + forceSendFields []string + + // nullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + nullFields []string +} + +type Scheduling struct { + Tasks []*ScheduledTask `json:"tasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Integration struct { + Rancher *RancherIntegration `json:"rancher,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RancherIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + AccessKey *string `json:"accessKey,omitempty"` + SecretKey *string `json:"secretKey,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ScheduledTask struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + Frequency *string `json:"frequency,omitempty"` + CronExpression *string `json:"cronExpression,omitempty"` + TaskType *string `json:"taskType,omitempty"` + ScaleTargetCapacity *int `json:"scaleTargetCapacity,omitempty"` + ScaleMinCapacity *int `json:"scaleMinCapacity,omitempty"` + ScaleMaxCapacity *int `json:"scaleMaxCapacity,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` + AdjustmentPercentage *int `json:"adjustmentPercentage,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Scaling struct { + Up []*ScalingPolicy `json:"up,omitempty"` + Down []*ScalingPolicy `json:"down,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ScalingPolicy struct { + PolicyName *string `json:"policyName,omitempty"` + MetricName *string `json:"metricName,omitempty"` + Statistic *string `json:"statistic,omitempty"` + Unit *string `json:"unit,omitempty"` + Threshold *float64 `json:"threshold,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` + MinTargetCapacity *int `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *int `json:"maxTargetCapacity,omitempty"` + Namespace *string `json:"namespace,omitempty"` + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + Period *int `json:"period,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Operator *string `json:"operator,omitempty"` + Dimensions []*Dimension `json:"dimensions,omitempty"` + Action *Action `json:"action,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Action struct { + Type *string `json:"type,omitempty"` + Adjustment *string `json:"adjustment,omitempty"` + MinTargetCapacity *string `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *string `json:"maxTargetCapacity,omitempty"` + Maximum *string `json:"maximum,omitempty"` + Minimum *string `json:"minimum,omitempty"` + Target *string `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Dimension struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Strategy struct { + LowPriorityPercentage *int `json:"lowPriorityPercentage,omitempty"` + DedicatedCount *int `json:"dedicatedCount,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + Signals []*Signal `json:"signals,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Signal struct { + Name *string `json:"name,omitempty"` + Timeout *int `json:"timeout,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Capacity struct { + Minimum *int `json:"minimum,omitempty"` + Maximum *int `json:"maximum,omitempty"` + Target *int `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Compute struct { + Region *string `json:"region,omitempty"` + Product *string `json:"product,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + VMSize *VMSize `json:"vmSizes,omitempty"` + LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"` + Health *Health `json:"health,omitempty"` + + forceSendFields []string + nullFields []string +} + +type VMSize struct { + Dedicated []string `json:"dedicatedSizes,omitempty"` + LowPriority []string `json:"lowPrioritySizes,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LaunchSpecification struct { + LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"` + Image *Image `json:"image,omitempty"` + UserData *UserData `json:"userData,omitempty"` + Storage *Storage `json:"storage,omitempty"` + Network *Network `json:"network,omitempty"` + SSHPublicKey *string `json:"sshPublicKey,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancersConfig struct { + LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancer struct { + BalancerID *string `json:"balancerId,omitempty"` + TargetSetID *string `json:"targetSetId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Image struct { + Custom *CustomImage `json:"customImage,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + SKU *string `json:"sku,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CustomImage struct { + ImageURIs []string `json:"imageUris,omitempty"` + + forceSendFields []string + nullFields []string +} + +type UserData struct { + CommandLine *string `json:"commandLine,omitempty"` + ResourceFiles []*ResourceFile `json:"resourceFiles,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ResourceFile struct { + URL *string `json:"resourceFileUrl,omitempty"` + TargetPath *string `json:"resourceFileTargetPath,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Storage struct { + AccountName *string `json:"storageAccountName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Network struct { + VirtualNetworkName *string `json:"virtualNetworkName,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Health struct { + HealthCheckType *string `json:"healthCheckType,omitempty"` + AutoHealing *bool `json:"autoHealing,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Node struct { + ID *string `json:"id,omitempty"` + VMSize *string `json:"vmSize,omitempty"` + State *string `json:"state,omitempty"` + LifeCycle *string `json:"lifeCycle,omitempty"` + Region *string `json:"region,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + CreatedAt *time.Time `json:"createdAt,omitempty"` +} + +type RollStrategy struct { + Action *string `json:"action,omitempty"` + ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ListGroupsInput struct{} + +type ListGroupsOutput struct { + Groups []*Group `json:"groups,omitempty"` +} + +type CreateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type CreateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type ReadGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ReadGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type DeleteGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type DeleteGroupOutput struct{} + +type StatusGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type StatusGroupOutput struct { + Nodes []*Node `json:"nodes,omitempty"` +} + +type DetachGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + InstanceIDs []string `json:"instancesToDetach,omitempty"` + ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"` + ShouldTerminateInstances *bool `json:"shouldTerminateInstances,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` +} + +type DetachGroupOutput struct{} + +type RollGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + Strategy *RollStrategy `json:"strategy,omitempty"` +} + +type RollGroupOutput struct{} + +func groupFromJSON(in []byte) (*Group, error) { + b := new(Group) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func groupsFromJSON(in []byte) ([]*Group, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Group, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupsFromJSON(body) +} + +func nodeFromJSON(in []byte) (*Node, error) { + b := new(Node) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func nodesFromJSON(in []byte) ([]*Node, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Node, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := nodeFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func nodesFromHttpResponse(resp *http.Response) ([]*Node, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nodesFromJSON(body) +} + +func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) { + r := client.NewRequest(http.MethodGet, "/compute/azure/group") + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListGroupsOutput{Groups: gs}, nil +} + +func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) { + r := client.NewRequest(http.MethodPost, "/compute/azure/group") + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ReadGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.Group.ID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.Group.ID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(UpdateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodDelete, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteGroupOutput{}, nil +} + +func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}/status", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + ns, err := nodesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &StatusGroupOutput{Nodes: ns}, nil +} + +func (s *ServiceOp) Detach(ctx context.Context, input *DetachGroupInput) (*DetachGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}/detachNodes", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DetachGroupOutput{}, nil +} + +func (s *ServiceOp) Roll(ctx context.Context, input *RollGroupInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/compute/azure/group/{groupId}/roll", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &RollGroupOutput{}, nil +} + +// region Group + +func (o *Group) MarshalJSON() ([]byte, error) { + type noMethod Group + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Group) SetId(v *string) *Group { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *Group) SetName(v *string) *Group { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Group) SetDescription(v *string) *Group { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +func (o *Group) SetCapacity(v *Capacity) *Group { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +func (o *Group) SetCompute(v *Compute) *Group { + if o.Compute = v; o.Compute == nil { + o.nullFields = append(o.nullFields, "Compute") + } + return o +} + +func (o *Group) SetStrategy(v *Strategy) *Group { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +func (o *Group) SetScaling(v *Scaling) *Group { + if o.Scaling = v; o.Scaling == nil { + o.nullFields = append(o.nullFields, "Scaling") + } + return o +} + +func (o *Group) SetScheduling(v *Scheduling) *Group { + if o.Scheduling = v; o.Scheduling == nil { + o.nullFields = append(o.nullFields, "Scheduling") + } + return o +} + +func (o *Group) SetIntegration(v *Integration) *Group { + if o.Integration = v; o.Integration == nil { + o.nullFields = append(o.nullFields, "Integration") + } + return o +} + +// endregion + +// region Scheduling + +func (o *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scheduling) SetTasks(v []*ScheduledTask) *Scheduling { + if o.Tasks = v; o.Tasks == nil { + o.nullFields = append(o.nullFields, "Tasks") + } + return o +} + +// endregion + +// region Integration + +func (o *Integration) MarshalJSON() ([]byte, error) { + type noMethod Integration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Integration) SetRancher(v *RancherIntegration) *Integration { + if o.Rancher = v; o.Rancher == nil { + o.nullFields = append(o.nullFields, "Rancher") + } + return o +} + +// endregion + +// region RancherIntegration + +func (o *RancherIntegration) MarshalJSON() ([]byte, error) { + type noMethod RancherIntegration + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RancherIntegration) SetMasterHost(v *string) *RancherIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *RancherIntegration) SetAccessKey(v *string) *RancherIntegration { + if o.AccessKey = v; o.AccessKey == nil { + o.nullFields = append(o.nullFields, "AccessKey") + } + return o +} + +func (o *RancherIntegration) SetSecretKey(v *string) *RancherIntegration { + if o.SecretKey = v; o.SecretKey == nil { + o.nullFields = append(o.nullFields, "SecretKey") + } + return o +} + +// endregion + +// region ScheduledTask + +func (o *ScheduledTask) MarshalJSON() ([]byte, error) { + type noMethod ScheduledTask + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ScheduledTask) SetIsEnabled(v *bool) *ScheduledTask { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *ScheduledTask) SetFrequency(v *string) *ScheduledTask { + if o.Frequency = v; o.Frequency == nil { + o.nullFields = append(o.nullFields, "Frequency") + } + return o +} + +func (o *ScheduledTask) SetCronExpression(v *string) *ScheduledTask { + if o.CronExpression = v; o.CronExpression == nil { + o.nullFields = append(o.nullFields, "CronExpression") + } + return o +} + +func (o *ScheduledTask) SetTaskType(v *string) *ScheduledTask { + if o.TaskType = v; o.TaskType == nil { + o.nullFields = append(o.nullFields, "TaskType") + } + return o +} + +func (o *ScheduledTask) SetScaleTargetCapacity(v *int) *ScheduledTask { + if o.ScaleTargetCapacity = v; o.ScaleTargetCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleTargetCapacity") + } + return o +} + +func (o *ScheduledTask) SetScaleMinCapacity(v *int) *ScheduledTask { + if o.ScaleMinCapacity = v; o.ScaleMinCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMinCapacity") + } + return o +} + +func (o *ScheduledTask) SetScaleMaxCapacity(v *int) *ScheduledTask { + if o.ScaleMaxCapacity = v; o.ScaleMaxCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMaxCapacity") + } + return o +} + +func (o *ScheduledTask) SetBatchSizePercentage(v *int) *ScheduledTask { + if o.BatchSizePercentage = v; o.BatchSizePercentage == nil { + o.nullFields = append(o.nullFields, "BatchSizePercentage") + } + return o +} + +func (o *ScheduledTask) SetGracePeriod(v *int) *ScheduledTask { + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +func (o *ScheduledTask) SetAdjustment(v *int) *ScheduledTask { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *ScheduledTask) SetAdjustmentPercentage(v *int) *ScheduledTask { + if o.AdjustmentPercentage = v; o.AdjustmentPercentage == nil { + o.nullFields = append(o.nullFields, "AdjustmentPercentage") + } + return o +} + +// endregion + +// region Scaling + +func (o *Scaling) MarshalJSON() ([]byte, error) { + type noMethod Scaling + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling { + if o.Up = v; o.Up == nil { + o.nullFields = append(o.nullFields, "Up") + } + return o +} + +func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +// endregion + +// region ScalingPolicy + +func (o *ScalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod ScalingPolicy + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy { + if o.PolicyName = v; o.PolicyName == nil { + o.nullFields = append(o.nullFields, "PolicyName") + } + return o +} + +func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy { + if o.MetricName = v; o.MetricName == nil { + o.nullFields = append(o.nullFields, "MetricName") + } + return o +} + +func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy { + if o.Statistic = v; o.Statistic == nil { + o.nullFields = append(o.nullFields, "Statistic") + } + return o +} + +func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy { + if o.Threshold = v; o.Threshold == nil { + o.nullFields = append(o.nullFields, "Threshold") + } + return o +} + +func (o *ScalingPolicy) SetAdjustment(v *int) *ScalingPolicy { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *ScalingPolicy) SetMinTargetCapacity(v *int) *ScalingPolicy { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetMaxTargetCapacity(v *int) *ScalingPolicy { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy { + if o.Namespace = v; o.Namespace == nil { + o.nullFields = append(o.nullFields, "Namespace") + } + return o +} + +func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy { + if o.Period = v; o.Period == nil { + o.nullFields = append(o.nullFields, "Period") + } + return o +} + +func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy { + if o.Operator = v; o.Operator == nil { + o.nullFields = append(o.nullFields, "Operator") + } + return o +} + +func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy { + if o.Dimensions = v; o.Dimensions == nil { + o.nullFields = append(o.nullFields, "Dimensions") + } + return o +} + +func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +// endregion + +// region Action + +func (o *Action) MarshalJSON() ([]byte, error) { + type noMethod Action + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Action) SetType(v *string) *Action { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Action) SetAdjustment(v *string) *Action { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *Action) SetMinTargetCapacity(v *string) *Action { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *Action) SetMaxTargetCapacity(v *string) *Action { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *Action) SetMaximum(v *string) *Action { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Action) SetMinimum(v *string) *Action { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Action) SetTarget(v *string) *Action { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Dimension + +func (o *Dimension) MarshalJSON() ([]byte, error) { + type noMethod Dimension + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Dimension) SetName(v *string) *Dimension { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Dimension) SetValue(v *string) *Dimension { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region Strategy + +func (o *Strategy) MarshalJSON() ([]byte, error) { + type noMethod Strategy + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Strategy) SetLowPriorityPercentage(v *int) *Strategy { + if o.LowPriorityPercentage = v; o.LowPriorityPercentage == nil { + o.nullFields = append(o.nullFields, "LowPriorityPercentage") + } + return o +} + +func (o *Strategy) SetDedicatedCount(v *int) *Strategy { + if o.DedicatedCount = v; o.DedicatedCount == nil { + o.nullFields = append(o.nullFields, "DedicatedCount") + } + return o +} + +func (o *Strategy) SetDrainingTimeout(v *int) *Strategy { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +func (o *Strategy) SetSignals(v []*Signal) *Strategy { + if o.Signals = v; o.Signals == nil { + o.nullFields = append(o.nullFields, "Signals") + } + return o +} + +// endregion + +// region Signal + +func (o *Signal) MarshalJSON() ([]byte, error) { + type noMethod Signal + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Signal) SetName(v *string) *Signal { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Signal) SetTimeout(v *int) *Signal { + if o.Timeout = v; o.Timeout == nil { + o.nullFields = append(o.nullFields, "Timeout") + } + return o +} + +// endregion + +// region Capacity + +func (o *Capacity) MarshalJSON() ([]byte, error) { + type noMethod Capacity + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Capacity) SetMinimum(v *int) *Capacity { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Capacity) SetMaximum(v *int) *Capacity { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Capacity) SetTarget(v *int) *Capacity { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Compute + +func (o *Compute) MarshalJSON() ([]byte, error) { + type noMethod Compute + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Compute) SetRegion(v *string) *Compute { + if o.Region = v; o.Region == nil { + o.nullFields = append(o.nullFields, "Region") + } + return o +} + +func (o *Compute) SetProduct(v *string) *Compute { + if o.Product = v; o.Product == nil { + o.nullFields = append(o.nullFields, "Product") + } + return o +} + +func (o *Compute) SetResourceGroupName(v *string) *Compute { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *Compute) SetVMSize(v *VMSize) *Compute { + if o.VMSize = v; o.VMSize == nil { + o.nullFields = append(o.nullFields, "VMSize") + } + return o +} + +func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute { + if o.LaunchSpecification = v; o.LaunchSpecification == nil { + o.nullFields = append(o.nullFields, "LaunchSpecification") + } + return o +} + +func (o *Compute) SetHealth(v *Health) *Compute { + if o.Health = v; o.Health == nil { + o.nullFields = append(o.nullFields, "Health") + } + return o +} + +// endregion + +// region VMSize + +func (o *VMSize) MarshalJSON() ([]byte, error) { + type noMethod VMSize + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *VMSize) SetDedicated(v []string) *VMSize { + if o.Dedicated = v; o.Dedicated == nil { + o.nullFields = append(o.nullFields, "Dedicated") + } + return o +} + +func (o *VMSize) SetLowPriority(v []string) *VMSize { + if o.LowPriority = v; o.LowPriority == nil { + o.nullFields = append(o.nullFields, "LowPriority") + } + return o +} + +// endregion + +// region LaunchSpecification + +func (o *LaunchSpecification) MarshalJSON() ([]byte, error) { + type noMethod LaunchSpecification + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification { + if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil { + o.nullFields = append(o.nullFields, "LoadBalancersConfig") + } + return o +} + +func (o *LaunchSpecification) SetImage(v *Image) *LaunchSpecification { + if o.Image = v; o.Image == nil { + o.nullFields = append(o.nullFields, "Image") + } + return o +} + +func (o *LaunchSpecification) SetUserData(v *UserData) *LaunchSpecification { + if o.UserData = v; o.UserData == nil { + o.nullFields = append(o.nullFields, "UserData") + } + return o +} + +func (o *LaunchSpecification) SetStorage(v *Storage) *LaunchSpecification { + if o.Storage = v; o.Storage == nil { + o.nullFields = append(o.nullFields, "Storage") + } + return o +} + +func (o *LaunchSpecification) SetNetwork(v *Network) *LaunchSpecification { + if o.Network = v; o.Network == nil { + o.nullFields = append(o.nullFields, "Network") + } + return o +} + +func (o *LaunchSpecification) SetSSHPublicKey(v *string) *LaunchSpecification { + if o.SSHPublicKey = v; o.SSHPublicKey == nil { + o.nullFields = append(o.nullFields, "SSHPublicKey") + } + return o +} + +// endregion + +// region LoadBalancersConfig + +func (o *LoadBalancersConfig) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancersConfig + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig { + if o.LoadBalancers = v; o.LoadBalancers == nil { + o.nullFields = append(o.nullFields, "LoadBalancers") + } + return o +} + +// endregion + +// region LoadBalancer + +func (o *LoadBalancer) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancer + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancer) SetBalancerId(v *string) *LoadBalancer { + if o.BalancerID = v; o.BalancerID == nil { + o.nullFields = append(o.nullFields, "BalancerID") + } + return o +} + +func (o *LoadBalancer) SetTargetSetId(v *string) *LoadBalancer { + if o.TargetSetID = v; o.TargetSetID == nil { + o.nullFields = append(o.nullFields, "TargetSetID") + } + return o +} + +// endregion + +// region Image + +func (o *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Image) SetCustom(v *CustomImage) *Image { + if o.Custom = v; o.Custom == nil { + o.nullFields = append(o.nullFields, "Custom") + } + return o +} + +func (o *Image) SetPublisher(v *string) *Image { + if o.Publisher = v; o.Publisher == nil { + o.nullFields = append(o.nullFields, "Publisher") + } + return o +} + +func (o *Image) SetOffer(v *string) *Image { + if o.Offer = v; o.Offer == nil { + o.nullFields = append(o.nullFields, "Offer") + } + return o +} + +func (o *Image) SetSKU(v *string) *Image { + if o.SKU = v; o.SKU == nil { + o.nullFields = append(o.nullFields, "SKU") + } + return o +} + +// endregion + +// region CustomImage + +func (o *CustomImage) MarshalJSON() ([]byte, error) { + type noMethod CustomImage + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *CustomImage) SetImageURIs(v []string) *CustomImage { + if o.ImageURIs = v; o.ImageURIs == nil { + o.nullFields = append(o.nullFields, "ImageURIs") + } + return o +} + +// endregion + +// region UserData + +func (o *UserData) MarshalJSON() ([]byte, error) { + type noMethod UserData + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *UserData) SetCommandLine(v *string) *UserData { + if o.CommandLine = v; o.CommandLine == nil { + o.nullFields = append(o.nullFields, "CommandLine") + } + return o +} + +func (o *UserData) SetResourceFiles(v []*ResourceFile) *UserData { + if o.ResourceFiles = v; o.ResourceFiles == nil { + o.nullFields = append(o.nullFields, "ResourceFiles") + } + return o +} + +// endregion + +// region ResourceFile + +func (o *ResourceFile) MarshalJSON() ([]byte, error) { + type noMethod ResourceFile + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ResourceFile) SetURL(v *string) *ResourceFile { + if o.URL = v; o.URL == nil { + o.nullFields = append(o.nullFields, "URL") + } + return o +} + +func (o *ResourceFile) SetTargetPath(v *string) *ResourceFile { + if o.TargetPath = v; o.TargetPath == nil { + o.nullFields = append(o.nullFields, "TargetPath") + } + return o +} + +// endregion + +// region Storage + +func (o *Storage) MarshalJSON() ([]byte, error) { + type noMethod Storage + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Storage) SetAccountName(v *string) *Storage { + if o.AccountName = v; o.AccountName == nil { + o.nullFields = append(o.nullFields, "AccountName") + } + return o +} + +// endregion + +// region Network + +func (o *Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Network) SetVirtualNetworkName(v *string) *Network { + if o.VirtualNetworkName = v; o.VirtualNetworkName == nil { + o.nullFields = append(o.nullFields, "VirtualNetworkName") + } + return o +} + +func (o *Network) SetSubnetId(v *string) *Network { + if o.SubnetID = v; o.SubnetID == nil { + o.nullFields = append(o.nullFields, "SubnetID") + } + return o +} + +// endregion + +// region Health + +func (o *Health) MarshalJSON() ([]byte, error) { + type noMethod Health + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Health) SetHealthCheckType(v *string) *Health { + if o.HealthCheckType = v; o.HealthCheckType == nil { + o.nullFields = append(o.nullFields, "HealthCheckType") + } + return o +} + +func (o *Health) SetAutoHealing(v *bool) *Health { + if o.AutoHealing = v; o.AutoHealing == nil { + o.nullFields = append(o.nullFields, "AutoHealing") + } + return o +} + +func (o *Health) SetGracePeriod(v *int) *Health { + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +// endregion diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go new file mode 100644 index 0000000000..1fc4ce47da --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go @@ -0,0 +1,39 @@ +package azure + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to +// endpoints of the Spotinst API. See this package's package overview docs +// for details on the service. +type Service interface { + List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error) + Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error) + Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error) + Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error) + Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error) + Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error) + Detach(context.Context, *DetachGroupInput) (*DetachGroupOutput, error) + Roll(context.Context, *RollGroupInput) (*RollGroupOutput, error) +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go new file mode 100644 index 0000000000..d0360faebb --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go @@ -0,0 +1,31 @@ +package azure + +import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + +type Tag struct { + Key *string `json:"tagKey,omitempty"` + Value *string `json:"tagValue,omitempty"` + + forceSendFields []string `json:"-"` + nullFields []string `json:"-"` +} + +func (o *Tag) MarshalJSON() ([]byte, error) { + type noMethod Tag + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tag) SetKey(v *string) *Tag { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *Tag) SetValue(v *string) *Tag { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/BUILD.bazel new file mode 100644 index 0000000000..b3b15b062e --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "service.go", + "tag.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce", + importpath = "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/service.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/service.go new file mode 100644 index 0000000000..47dc4ad437 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/service.go @@ -0,0 +1,28 @@ +package gce + +import ( + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to +// endpoints of the Spotinst API. See this package's package overview docs +// for details on the service. +type Service interface{} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/tag.go b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/tag.go new file mode 100644 index 0000000000..a41bd9f412 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gce/tag.go @@ -0,0 +1,31 @@ +package gce + +import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + +type Tag struct { + Key *string `json:"tagKey,omitempty"` + Value *string `json:"tagValue,omitempty"` + + forceSendFields []string `json:"-"` + nullFields []string `json:"-"` +} + +func (o *Tag) MarshalJSON() ([]byte, error) { + type noMethod Tag + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tag) SetKey(v *string) *Tag { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *Tag) SetValue(v *string) *Tag { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/BUILD.bazel new file mode 100644 index 0000000000..031da32877 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "types.go", + "version.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials:go_default_library", + "//vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log:go_default_library", + ], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/BUILD.bazel new file mode 100644 index 0000000000..68ce35581e --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "request.go", + "response.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/client", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go new file mode 100644 index 0000000000..0c448d96cb --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +// Client provides a client to the API. +type Client struct { + config *spotinst.Config +} + +// New returns a new client. +func New(cfg *spotinst.Config) *Client { + if cfg == nil { + cfg = spotinst.DefaultConfig() + } + return &Client{cfg} +} + +// NewRequest is used to create a new request. +func NewRequest(method, path string) *Request { + return &Request{ + method: method, + url: &url.URL{ + Path: path, + }, + header: make(http.Header), + Params: make(url.Values), + } +} + +// Do runs a request with our client. +func (c *Client) Do(ctx context.Context, r *Request) (*http.Response, error) { + req, err := r.toHTTP(ctx, c.config) + if err != nil { + return nil, err + } + c.logRequest(req) + resp, err := c.config.HTTPClient.Do(req) + c.logResponse(resp) + return resp, err +} + +func (c *Client) logf(format string, args ...interface{}) { + if c.config.Logger != nil { + c.config.Logger.Printf(format, args...) + } +} + +const logReqMsg = `SPOTINST: Request "%s %s" details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +func (c *Client) logRequest(req *http.Request) { + if c.config.Logger != nil && req != nil { + out, err := httputil.DumpRequestOut(req, true) + if err == nil { + c.logf(logReqMsg, req.Method, req.URL, string(out)) + } + } +} + +const logRespMsg = `SPOTINST: Response "%s %s" details: +---[ RESPONSE ]---------------------------------------- +%s +-------------------------------------------------------` + +func (c *Client) logResponse(resp *http.Response) { + if c.config.Logger != nil && resp != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.logf(logRespMsg, resp.Request.Method, resp.Request.URL, string(out)) + } + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go new file mode 100644 index 0000000000..efbe32f946 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +type Request struct { + Obj interface{} + Params url.Values + url *url.URL + method string + body io.Reader + header http.Header +} + +// toHTTP converts the request to an HTTP request. +func (r *Request) toHTTP(ctx context.Context, cfg *spotinst.Config) (*http.Request, error) { + // Set the user credentials. + creds, err := cfg.Credentials.Get() + if err != nil { + return nil, err + } + if creds.Token != "" { + r.header.Set("Authorization", "Bearer "+creds.Token) + } + if creds.Account != "" { + r.Params.Set("accountId", creds.Account) + } + + // Encode the query parameters. + r.url.RawQuery = r.Params.Encode() + + // Check if we should encode the body. + if r.body == nil && r.Obj != nil { + if b, err := EncodeBody(r.Obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request. + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // Set request base URL. + req.URL.Host = cfg.BaseURL.Host + req.URL.Scheme = cfg.BaseURL.Scheme + + // Set request headers. + req.Host = cfg.BaseURL.Host + req.Header = r.header + req.Header.Set("Content-Type", cfg.ContentType) + req.Header.Add("Accept", cfg.ContentType) + req.Header.Add("User-Agent", cfg.UserAgent) + + return req.WithContext(ctx), nil +} + +// EncodeBody is used to encode a request body +func EncodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + if err := json.NewEncoder(buf).Encode(obj); err != nil { + return nil, err + } + return buf, nil +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go new file mode 100644 index 0000000000..0956ea42b9 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go @@ -0,0 +1,110 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" +) + +type Response struct { + Request struct { + ID string `json:"id"` + } `json:"request"` + Response struct { + Errors []responseError `json:"errors"` + Items []json.RawMessage `json:"items"` + } `json:"response"` +} + +type responseError struct { + Code string `json:"code"` + Message string `json:"message"` + Field string `json:"field"` +} + +type Error struct { + Response *http.Response `json:"-"` + Code string `json:"code"` + Message string `json:"message"` + Field string `json:"field"` + RequestID string `json:"requestId"` +} + +func (e Error) Error() string { + msg := fmt.Sprintf("%v %v: %d (request: %q) %v: %v", + e.Response.Request.Method, e.Response.Request.URL, + e.Response.StatusCode, e.RequestID, e.Code, e.Message) + + if e.Field != "" { + msg = fmt.Sprintf("%s (field: %v)", msg, e.Field) + } + + return msg +} + +type Errors []Error + +func (es Errors) Error() string { + var stack string + for _, e := range es { + stack += e.Error() + "\n" + } + return stack +} + +// DecodeBody is used to JSON decode a body +func DecodeBody(resp *http.Response, out interface{}) error { + return json.NewDecoder(resp.Body).Decode(out) +} + +// RequireOK is used to verify response status code is a successful one (200 OK) +func RequireOK(resp *http.Response, err error) (*http.Response, error) { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, extractError(resp) + } + return resp, nil +} + +// extractError is used to extract inner/logical errors from the response +func extractError(resp *http.Response) error { + buf := bytes.NewBuffer(nil) + + // TeeReader returns a Reader that writes to b what it reads from r.Body. + reader := io.TeeReader(resp.Body, buf) + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(buf) + + var out Response + if err := json.NewDecoder(reader).Decode(&out); err != nil { + return err + } + + var errors Errors + if errs := out.Response.Errors; len(errs) > 0 { + for _, err := range errs { + errors = append(errors, Error{ + Response: resp, + RequestID: out.Request.ID, + Code: err.Code, + Message: err.Message, + Field: err.Field, + }) + } + } else { + errors = append(errors, Error{ + Response: resp, + RequestID: out.Request.ID, + Code: strconv.Itoa(resp.StatusCode), + Message: http.StatusText(resp.StatusCode), + }) + } + + return errors +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go new file mode 100644 index 0000000000..db349b29cc --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go @@ -0,0 +1,161 @@ +package spotinst + +import ( + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst/credentials" + "github.com/spotinst/spotinst-sdk-go/spotinst/log" +) + +const ( + // defaultBaseURL is the default base URL of the Spotinst API. + // It is used e.g. when initializing a new Client without a specific address. + defaultBaseURL = "https://api.spotinst.io" + + // defaultContentType is the default content type to use when making HTTP + // calls. + defaultContentType = "application/json" + + // defaultUserAgent is the default user agent to use when making HTTP + // calls. + defaultUserAgent = SDKName + "/" + SDKVersion + + // defaultMaxRetries is the number of retries for a single request after + // the client will give up and return an error. It is zero by default, so + // retry is disabled by default. + defaultMaxRetries = 0 + + // defaultGzipEnabled specifies if gzip compression is enabled by default. + defaultGzipEnabled = false +) + +// A Config provides Configuration to a service client instance. +type Config struct { + BaseURL *url.URL + HTTPClient *http.Client + Credentials *credentials.Credentials + Logger log.Logger + UserAgent string + ContentType string +} + +func DefaultBaseURL() *url.URL { + baseURL, _ := url.Parse(defaultBaseURL) + return baseURL +} + +// DefaultTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: false, + MaxIdleConnsPerHost: 1, + } +} + +// DefaultHTTPClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// KeepAlives disabled. +func DefaultHTTPClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to API. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to API. +func DefaultConfig() *Config { + return &Config{ + BaseURL: DefaultBaseURL(), + HTTPClient: DefaultHTTPClient(), + UserAgent: defaultUserAgent, + ContentType: defaultContentType, + Credentials: credentials.NewChainCredentials( + new(credentials.EnvProvider), + new(credentials.FileProvider), + ), + } +} + +// WithBaseURL defines the base URL of the Spotinst API. +func (c *Config) WithBaseURL(rawurl string) *Config { + baseURL, _ := url.Parse(rawurl) + c.BaseURL = baseURL + return c +} + +// WithHTTPClient defines the HTTP client. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithCredentials defines the credentials. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithUserAgent defines the user agent. +func (c *Config) WithUserAgent(ua string) *Config { + c.UserAgent = fmt.Sprintf("%s,%s", ua, c.UserAgent) + return c +} + +// WithContentType defines the content type. +func (c *Config) WithContentType(ct string) *Config { + c.ContentType = ct + return c +} + +// WithLogger defines the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func (c *Config) WithLogger(logger log.Logger) *Config { + c.Logger = logger + return c +} + +// Merge merges the passed in configs into the existing config object. +func (c *Config) Merge(cfgs ...*Config) { + for _, other := range cfgs { + mergeConfig(c, other) + } +} + +func mergeConfig(dst *Config, other *Config) { + if other == nil { + return + } + if other.BaseURL != nil { + dst.BaseURL = other.BaseURL + } + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + if other.UserAgent != "" { + dst.UserAgent = other.UserAgent + } + if other.ContentType != "" { + dst.ContentType = other.ContentType + } + if other.Logger != nil { + dst.Logger = other.Logger + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/BUILD.bazel new file mode 100644 index 0000000000..ef3c0205b0 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "credentials.go", + "provider_chain.go", + "provider_env.go", + "provider_file.go", + "provider_static.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/credentials", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go new file mode 100644 index 0000000000..65ee1925dd --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go @@ -0,0 +1,86 @@ +package credentials + +import ( + "fmt" + "sync" +) + +// A Value is the Spotinst credentials value for individual credential fields. +type Value struct { + // Spotinst API token. + Token string `json:"token"` + + // Spotinst account ID. + Account string `json:"account"` + + // Provider used to get credentials. + ProviderName string `json:"-"` +} + +// A Provider is the interface for any component which will provide credentials +// Value. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + fmt.Stringer + + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) +} + +// A Credentials provides synchronous safe retrieval of Spotinst credentials. +// Credentials will cache the credentials value. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value. +type Credentials struct { + provider Provider + mu sync.Mutex + forceRefresh bool + creds Value +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value. If the credentials Value is empty +// the Provider's Retrieve() will be called to refresh the credentials. +func (c *Credentials) Get() (Value, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.creds.Token == "" || c.forceRefresh { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Refresh refreshes the credentials and forces them to be retrieved on the +// next call to Get(). +func (c *Credentials) Refresh() { + c.mu.Lock() + defer c.mu.Unlock() + + c.forceRefresh = true +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go new file mode 100644 index 0000000000..2bb80fe8f3 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go @@ -0,0 +1,102 @@ +package credentials + +import ( + "errors" + "fmt" +) + +// ErrNoValidProvidersFoundInChain Is returned when there are no valid +// credentials providers in the ChainProvider. +var ErrNoValidProvidersFoundInChain = errors.New("spotinst: no valid credentials providers in chain") + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls until Retrieve is called again. +// +// Example of ChainProvider to be used with an EnvCredentialsProvider and +// FileCredentialsProvider. In this example EnvProvider will first check if +// any credentials are available via the environment variables. If there are +// none ChainProvider will check the next Provider in the list, FileProvider +// in this case. If FileCredentialsProvider does not return any credentials +// ChainProvider will return the error ErrNoValidProvidersFoundInChain. +// +// creds := credentials.NewChainCredentials( +// new(credentials.EnvProvider), +// new(credentials.FileProvider), +// ) +type ChainProvider struct { + Providers []Provider + active Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers ...Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: providers, + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. If a provider is found it will be cached. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs errorList + for _, p := range c.Providers { + value, err := p.Retrieve() + if err == nil { + c.active = p + return value, nil + } + errs = append(errs, err) + } + c.active = nil + + err := ErrNoValidProvidersFoundInChain + if len(errs) > 0 { + err = errs + } + + return Value{}, err +} + +func (c *ChainProvider) String() string { + var out string + for i, provider := range c.Providers { + out += provider.String() + if i < len(c.Providers)-1 { + out += " " + } + } + return out +} + +// An error list that satisfies the error interface. +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n'. + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go new file mode 100644 index 0000000000..3e85791e6c --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go @@ -0,0 +1,62 @@ +package credentials + +import ( + "fmt" + "os" +) + +const ( + // EnvCredentialsProviderName provides a name of Env provider. + EnvCredentialsProviderName = "EnvCredentialsProvider" + + // EnvCredentialsVarToken specifies the name of the environment variable + // points to the Spotinst Token. + EnvCredentialsVarToken = "SPOTINST_TOKEN" + + // EnvCredentialsVarAccount specifies the name of the environment variable + // points to the Spotinst account ID. + EnvCredentialsVarAccount = "SPOTINST_ACCOUNT" +) + +// ErrEnvCredentialsTokenNotFound is returned when the Spotinst Token can't be +// found in the process's environment. +var ErrEnvCredentialsTokenNotFound = fmt.Errorf("spotinst: %s not found in environment", EnvCredentialsVarToken) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. +// +// Environment variables used: +// * Token: SPOTINST_TOKEN +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + token := os.Getenv(EnvCredentialsVarToken) + if token == "" { + return Value{ProviderName: EnvCredentialsProviderName}, + ErrEnvCredentialsTokenNotFound + } + + e.retrieved = true + value := Value{ + Token: token, + Account: os.Getenv(EnvCredentialsVarAccount), + ProviderName: EnvCredentialsProviderName, + } + + return value, nil +} + +func (e *EnvProvider) String() string { + return EnvCredentialsProviderName +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go new file mode 100644 index 0000000000..761a854852 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go @@ -0,0 +1,129 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" +) + +const ( + // FileCredentialsProviderName provides a name of File provider. + FileCredentialsProviderName = "FileCredentialsProvider" + + // FileCredentialsEnvVarFile specifies the name of the environment variable + // points to the location of the credentials file. + FileCredentialsEnvVarFile = "SPOTINST_CREDENTIALS_FILE" +) + +var ( + // ErrFileCredentialsHomeNotFound is emitted when the user directory + // cannot be found. + ErrFileCredentialsHomeNotFound = errors.New("spotinst: user home directory not found") + + // ErrFileCredentialsLoadFailed is emitted when the provider is unable to + // load credentials from the credentials file. + ErrFileCredentialsLoadFailed = errors.New("spotinst: failed to load credentials file") + + // ErrFileCredentialsTokenNotFound is emitted when the loaded credentials + // did not contain a valid token. + ErrFileCredentialsTokenNotFound = errors.New("spotinst: credentials did not contain token") +) + +// A FileProvider retrieves credentials from the current user's home +// directory. +type FileProvider struct { + // Path to the credentials file. + // + // If empty will look for FileCredentialsEnvVarFile env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.spotinst/credentials.json" + // Windows: "%USERPROFILE%\.spotinst\credentials.json" + Filename string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileCredentials returns a pointer to a new Credentials object +// wrapping the file provider. +func NewFileCredentials(filename string) *Credentials { + return NewCredentials(&FileProvider{ + Filename: filename, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: FileCredentialsProviderName}, err + } + + creds, err := p.loadCredentials(filename) + if err != nil { + return Value{ProviderName: FileCredentialsProviderName}, err + } + + if len(creds.ProviderName) == 0 { + creds.ProviderName = FileCredentialsProviderName + } + + p.retrieved = true + return creds, nil +} + +func (p *FileProvider) String() string { + return FileCredentialsProviderName +} + +// filename returns the filename to use to read Spotinst credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *FileProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv(FileCredentialsEnvVarFile); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrFileCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".spotinst", "credentials") + } + + return p.Filename, nil +} + +// loadCredentials loads the credentials from the file pointed to by filename. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func (p *FileProvider) loadCredentials(filename string) (Value, error) { + f, err := os.Open(filename) + if err != nil { + return Value{ProviderName: FileCredentialsProviderName}, + fmt.Errorf("%s: %s", ErrFileCredentialsLoadFailed.Error(), err) + } + defer f.Close() + + var value Value + if err := json.NewDecoder(f).Decode(&value); err != nil { + return Value{ProviderName: FileCredentialsProviderName}, + fmt.Errorf("%s: %s", ErrFileCredentialsLoadFailed.Error(), err) + } + if token := value.Token; len(token) == 0 { + return Value{ProviderName: FileCredentialsProviderName}, + ErrFileCredentialsTokenNotFound + } + + return value, nil +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go new file mode 100644 index 0000000000..bd547f59cd --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go @@ -0,0 +1,41 @@ +package credentials + +import ( + "errors" +) + +// StaticCredentialsProviderName provides a name of Static provider. +const StaticCredentialsProviderName = "StaticProvider" + +// ErrStaticCredentialsEmpty is emitted when static credentials are empty. +var ErrStaticCredentialsEmpty = errors.New("spotinst: static credentials are empty") + +// A StaticProvider is a set of credentials which are set programmatically. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(token, account string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + Token: token, + Account: account, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.Token == "" { + return Value{ProviderName: StaticCredentialsProviderName}, + ErrStaticCredentialsEmpty + } + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticCredentialsProviderName + } + return s.Value, nil +} + +func (s *StaticProvider) String() string { + return StaticCredentialsProviderName +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/BUILD.bazel new file mode 100644 index 0000000000..7df5da4c83 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["log.go"], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/log", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go new file mode 100644 index 0000000000..18b184d424 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go @@ -0,0 +1,26 @@ +package log + +import ( + "log" + "os" +) + +// DefaultStdLogger represents the default logging object that +// generates lines of output to os.Stderr. +var DefaultStdLogger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, args ...interface{}) +} + +// The LoggerFunc type is an adapter to allow the use of +// ordinary functions as Logger. If f is a function +// with the appropriate signature, LoggerFunc(f) is a +// Logger that calls f. +type LoggerFunc func(format string, args ...interface{}) + +// Printf calls f(format, args). +func (f LoggerFunc) Printf(format string, args ...interface{}) { + f(format, args...) +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/BUILD.bazel new file mode 100644 index 0000000000..555db33783 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["session.go"], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/session", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/spotinst/spotinst-sdk-go/spotinst:go_default_library"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go new file mode 100644 index 0000000000..1911a45531 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go @@ -0,0 +1,22 @@ +package session + +import ( + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +// A Session provides a central location to create service clients. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +type Session struct { + Config *spotinst.Config +} + +// New creates a new instance of Session. Once the Session is created it +// can be mutated to modify the Config. The Session is safe to be read +// concurrently, but it should not be written to concurrently. +func New(cfgs ...*spotinst.Config) *Session { + s := &Session{Config: spotinst.DefaultConfig()} + s.Config.Merge(cfgs...) + return s +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go new file mode 100644 index 0000000000..2c10b87c0a --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go @@ -0,0 +1,357 @@ +package spotinst + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers. +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values. +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers. +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values. +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers. +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values. +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers. +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values. +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers. +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values. +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers. +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values. +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers. +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values. +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers. +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values. +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/BUILD.bazel new file mode 100644 index 0000000000..c72682086e --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["json.go"], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go new file mode 100644 index 0000000000..a40b0298de --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go @@ -0,0 +1,184 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonutil + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + useNull := make(map[string]struct{}) + for _, f := range nullFields { + useNull[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude, useNull) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + + if _, ok := useNull[f.Name]; ok { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/BUILD.bazel new file mode 100644 index 0000000000..4ac28b439f --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["stringutil.go"], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go new file mode 100644 index 0000000000..87d89e7f6a --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go @@ -0,0 +1,69 @@ +package stringutil + +import ( + "bytes" + "fmt" + "io" + "reflect" +) + +// Stringify attempts to create a reasonable string representation of types. +// It does things like resolve pointers to their values and omits struct +// fields with nil values. +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// stringifyValue was heavily inspired by the goprotobuf library. +func stringifyValue(w io.Writer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + w.Write([]byte("")) + return + } + v := reflect.Indirect(val) + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + w.Write([]byte{' '}) + } + stringifyValue(w, v.Index(i)) + } + w.Write([]byte{']'}) + return + case reflect.Struct: + if v.Type().Name() != "" { + w.Write([]byte(v.Type().String())) + } + w.Write([]byte{'{'}) + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + if sep { + w.Write([]byte(", ")) + } else { + sep = true + } + w.Write([]byte(v.Type().Field(i).Name)) + w.Write([]byte{':'}) + stringifyValue(w, fv) + } + w.Write([]byte{'}'}) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/BUILD.bazel b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/BUILD.bazel new file mode 100644 index 0000000000..30d61e1c2c --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "uritemplates.go", + "utils.go", + ], + importmap = "k8s.io/kops/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates", + importpath = "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE new file mode 100644 index 0000000000..de9c88cb65 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go new file mode 100644 index 0000000000..f6b208ccbb --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go @@ -0,0 +1,361 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +type Values map[string]interface{} + +// A Template is a parsed representation of a URI template. +type Template struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a Template object. +func Parse(rawtemplate string) (template *Template, err error) { + template = new(Template) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *Template) Expand(value interface{}) (string, error) { + values, ismap := value.(Values) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected Values, struct, or pointer to struct") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values Values) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go new file mode 100644 index 0000000000..bde51c857b --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go @@ -0,0 +1,9 @@ +package uritemplates + +func Expand(path string, values Values) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + return template.Expand(values) +} diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go new file mode 100644 index 0000000000..0df1a30fa1 --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go @@ -0,0 +1,7 @@ +package spotinst + +// SDKVersion is the current version of the SDK. +const SDKVersion = "3.2.24" + +// SDKName is the name of the SDK. +const SDKName = "spotinst-sdk-go" diff --git a/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel b/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel deleted file mode 100644 index f3f08c96e8..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "default_rate_limiters.go", - "delaying_queue.go", - "doc.go", - "metrics.go", - "parallelizer.go", - "queue.go", - "rate_limitting_queue.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/client-go/util/workqueue", - importpath = "k8s.io/client-go/util/workqueue", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/time/rate:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go deleted file mode 100644 index a5bed29e00..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "math" - "sync" - "time" - - "golang.org/x/time/rate" -) - -type RateLimiter interface { - // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing - // or for success, we'll stop tracking it - Forget(item interface{}) - // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int -} - -// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has -// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential -func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ) -} - -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { - *rate.Limiter -} - -var _ RateLimiter = &BucketRateLimiter{} - -func (r *BucketRateLimiter) When(item interface{}) time.Duration { - return r.Limiter.Reserve().Delay() -} - -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { - return 0 -} - -func (r *BucketRateLimiter) Forget(item interface{}) { -} - -// ItemExponentialFailureRateLimiter does a simple baseDelay*10^ limit -// dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { - failuresLock sync.Mutex - failures map[interface{}]int - - baseDelay time.Duration - maxDelay time.Duration -} - -var _ RateLimiter = &ItemExponentialFailureRateLimiter{} - -func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, - baseDelay: baseDelay, - maxDelay: maxDelay, - } -} - -func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) -} - -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - exp := r.failures[item] - r.failures[item] = r.failures[item] + 1 - - // The backoff is capped such that 'calculated' value never overflows. - backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) - if backoff > math.MaxInt64 { - return r.maxDelay - } - - calculated := time.Duration(backoff) - if calculated > r.maxDelay { - return r.maxDelay - } - - return calculated -} - -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - return r.failures[item] -} - -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - delete(r.failures, item) -} - -// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { - failuresLock sync.Mutex - failures map[interface{}]int - - maxFastAttempts int - fastDelay time.Duration - slowDelay time.Duration -} - -var _ RateLimiter = &ItemFastSlowRateLimiter{} - -func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, - fastDelay: fastDelay, - slowDelay: slowDelay, - maxFastAttempts: maxFastAttempts, - } -} - -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - r.failures[item] = r.failures[item] + 1 - - if r.failures[item] <= r.maxFastAttempts { - return r.fastDelay - } - - return r.slowDelay -} - -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - return r.failures[item] -} - -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - delete(r.failures, item) -} - -// MaxOfRateLimiter calls every RateLimiter and returns the worst case response -// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items -// were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter -} - -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { - ret := time.Duration(0) - for _, limiter := range r.limiters { - curr := limiter.When(item) - if curr > ret { - ret = curr - } - } - - return ret -} - -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} -} - -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { - ret := 0 - for _, limiter := range r.limiters { - curr := limiter.NumRequeues(item) - if curr > ret { - ret = curr - } - } - - return ret -} - -func (r *MaxOfRateLimiter) Forget(item interface{}) { - for _, limiter := range r.limiters { - limiter.Forget(item) - } -} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go deleted file mode 100644 index a37177425d..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "container/heap" - "time" - - "k8s.io/apimachinery/pkg/util/clock" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to -// requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface - // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) -} - -// NewDelayingQueue constructs a new workqueue with delayed queuing ability -func NewDelayingQueue() DelayingInterface { - return newDelayingQueue(clock.RealClock{}, "") -} - -func NewNamedDelayingQueue(name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, name) -} - -func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { - ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.NewTicker(maxWait), - stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), - } - - go ret.waitingLoop() - - return ret -} - -// delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface - - // clock tracks time for delayed firing - clock clock.Clock - - // stopCh lets us signal a shutdown to the waiting loop - stopCh chan struct{} - - // heartbeat ensures we wait no more than maxWait before firing - heartbeat clock.Ticker - - // waitingForAddCh is a buffered channel that feeds waitingForAdd - waitingForAddCh chan *waitFor - - // metrics counts the number of retries - metrics retryMetrics -} - -// waitFor holds the data to add and the time it should be added -type waitFor struct { - data t - readyAt time.Time - // index in the priority queue (heap) - index int -} - -// waitForPriorityQueue implements a priority queue for waitFor items. -// -// waitForPriorityQueue implements heap.Interface. The item occurring next in -// time (i.e., the item with the smallest readyAt) is at the root (index 0). -// Peek returns this minimum item at index 0. Pop returns the minimum item after -// it has been removed from the queue and placed at index Len()-1 by -// container/heap. Push adds an item at index Len(), and container/heap -// percolates it into the correct location. -type waitForPriorityQueue []*waitFor - -func (pq waitForPriorityQueue) Len() int { - return len(pq) -} -func (pq waitForPriorityQueue) Less(i, j int) bool { - return pq[i].readyAt.Before(pq[j].readyAt) -} -func (pq waitForPriorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -// Push adds an item to the queue. Push should not be called directly; instead, -// use `heap.Push`. -func (pq *waitForPriorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*waitFor) - item.index = n - *pq = append(*pq, item) -} - -// Pop removes an item from the queue. Pop should not be called directly; -// instead, use `heap.Pop`. -func (pq *waitForPriorityQueue) Pop() interface{} { - n := len(*pq) - item := (*pq)[n-1] - item.index = -1 - *pq = (*pq)[0:(n - 1)] - return item -} - -// Peek returns the item at the beginning of the queue, without removing the -// item or otherwise mutating the queue. It is safe to call directly. -func (pq waitForPriorityQueue) Peek() interface{} { - return pq[0] -} - -// ShutDown gives a way to shut off this queue -func (q *delayingType) ShutDown() { - q.Interface.ShutDown() - close(q.stopCh) - q.heartbeat.Stop() -} - -// AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { - // don't add if we're already shutting down - if q.ShuttingDown() { - return - } - - q.metrics.retry() - - // immediately add things with no delay - if duration <= 0 { - q.Add(item) - return - } - - select { - case <-q.stopCh: - // unblock if ShutDown() is called - case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: - } -} - -// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. -// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an -// expired item sitting for more than 10 seconds. -const maxWait = 10 * time.Second - -// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { - defer utilruntime.HandleCrash() - - // Make a placeholder channel to use when there are no items in our list - never := make(<-chan time.Time) - - waitingForQueue := &waitForPriorityQueue{} - heap.Init(waitingForQueue) - - waitingEntryByData := map[t]*waitFor{} - - for { - if q.Interface.ShuttingDown() { - return - } - - now := q.clock.Now() - - // Add ready entries - for waitingForQueue.Len() > 0 { - entry := waitingForQueue.Peek().(*waitFor) - if entry.readyAt.After(now) { - break - } - - entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data) - delete(waitingEntryByData, entry.data) - } - - // Set up a wait for the first item's readyAt (if one exists) - nextReadyAt := never - if waitingForQueue.Len() > 0 { - entry := waitingForQueue.Peek().(*waitFor) - nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) - } - - select { - case <-q.stopCh: - return - - case <-q.heartbeat.C(): - // continue the loop, which will add ready items - - case <-nextReadyAt: - // continue the loop, which will add ready items - - case waitEntry := <-q.waitingForAddCh: - if waitEntry.readyAt.After(q.clock.Now()) { - insert(waitingForQueue, waitingEntryByData, waitEntry) - } else { - q.Add(waitEntry.data) - } - - drained := false - for !drained { - select { - case waitEntry := <-q.waitingForAddCh: - if waitEntry.readyAt.After(q.clock.Now()) { - insert(waitingForQueue, waitingEntryByData, waitEntry) - } else { - q.Add(waitEntry.data) - } - default: - drained = true - } - } - } - } -} - -// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue -func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { - // if the entry already exists, update the time only if it would cause the item to be queued sooner - existing, exists := knownEntries[entry.data] - if exists { - if existing.readyAt.After(entry.readyAt) { - existing.readyAt = entry.readyAt - heap.Fix(q, existing.index) - } - - return - } - - heap.Push(q, entry) - knownEntries[entry.data] = entry -} diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go deleted file mode 100644 index 2a00c74ac5..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package workqueue provides a simple queue that supports the following -// features: -// * Fair: items processed in the order in which they are added. -// * Stingy: a single item will not be processed multiple times concurrently, -// and if an item is added multiple times before it can be processed, it -// will only be processed once. -// * Multiple consumers and producers. In particular, it is allowed for an -// item to be reenqueued while it is being processed. -// * Shutdown notifications. -package workqueue diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go deleted file mode 100644 index a481bdfb26..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sync" - "time" -) - -// This file provides abstractions for setting the provider (e.g., prometheus) -// of metrics. - -type queueMetrics interface { - add(item t) - get(item t) - done(item t) -} - -// GaugeMetric represents a single numerical value that can arbitrarily go up -// and down. -type GaugeMetric interface { - Inc() - Dec() -} - -// CounterMetric represents a single numerical value that only ever -// goes up. -type CounterMetric interface { - Inc() -} - -// SummaryMetric captures individual observations. -type SummaryMetric interface { - Observe(float64) -} - -type noopMetric struct{} - -func (noopMetric) Inc() {} -func (noopMetric) Dec() {} -func (noopMetric) Observe(float64) {} - -type defaultQueueMetrics struct { - // current depth of a workqueue - depth GaugeMetric - // total number of adds handled by a workqueue - adds CounterMetric - // how long an item stays in a workqueue - latency SummaryMetric - // how long processing an item from a workqueue takes - workDuration SummaryMetric - addTimes map[t]time.Time - processingStartTimes map[t]time.Time -} - -func (m *defaultQueueMetrics) add(item t) { - if m == nil { - return - } - - m.adds.Inc() - m.depth.Inc() - if _, exists := m.addTimes[item]; !exists { - m.addTimes[item] = time.Now() - } -} - -func (m *defaultQueueMetrics) get(item t) { - if m == nil { - return - } - - m.depth.Dec() - m.processingStartTimes[item] = time.Now() - if startTime, exists := m.addTimes[item]; exists { - m.latency.Observe(sinceInMicroseconds(startTime)) - delete(m.addTimes, item) - } -} - -func (m *defaultQueueMetrics) done(item t) { - if m == nil { - return - } - - if startTime, exists := m.processingStartTimes[item]; exists { - m.workDuration.Observe(sinceInMicroseconds(startTime)) - delete(m.processingStartTimes, item) - } -} - -// Gets the time since the specified start in microseconds. -func sinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) -} - -type retryMetrics interface { - retry() -} - -type defaultRetryMetrics struct { - retries CounterMetric -} - -func (m *defaultRetryMetrics) retry() { - if m == nil { - return - } - - m.retries.Inc() -} - -// MetricsProvider generates various metrics used by the queue. -type MetricsProvider interface { - NewDepthMetric(name string) GaugeMetric - NewAddsMetric(name string) CounterMetric - NewLatencyMetric(name string) SummaryMetric - NewWorkDurationMetric(name string) SummaryMetric - NewRetriesMetric(name string) CounterMetric -} - -type noopMetricsProvider struct{} - -func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { - return noopMetric{} -} - -var metricsFactory = struct { - metricsProvider MetricsProvider - setProviders sync.Once -}{ - metricsProvider: noopMetricsProvider{}, -} - -func newQueueMetrics(name string) queueMetrics { - var ret *defaultQueueMetrics - if len(name) == 0 { - return ret - } - return &defaultQueueMetrics{ - depth: metricsFactory.metricsProvider.NewDepthMetric(name), - adds: metricsFactory.metricsProvider.NewAddsMetric(name), - latency: metricsFactory.metricsProvider.NewLatencyMetric(name), - workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, - } -} - -func newRetryMetrics(name string) retryMetrics { - var ret *defaultRetryMetrics - if len(name) == 0 { - return ret - } - return &defaultRetryMetrics{ - retries: metricsFactory.metricsProvider.NewRetriesMetric(name), - } -} - -// SetProvider sets the metrics provider of the metricsFactory. -func SetProvider(metricsProvider MetricsProvider) { - metricsFactory.setProviders.Do(func() { - metricsFactory.metricsProvider = metricsProvider - }) -} diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go deleted file mode 100644 index be668c4233..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sync" - - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -type DoWorkPieceFunc func(piece int) - -// Parallelize is a very simple framework that allow for parallelizing -// N independent pieces of work. -func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { - toProcess := make(chan int, pieces) - for i := 0; i < pieces; i++ { - toProcess <- i - } - close(toProcess) - - if pieces < workers { - workers = pieces - } - - wg := sync.WaitGroup{} - wg.Add(workers) - for i := 0; i < workers; i++ { - go func() { - defer utilruntime.HandleCrash() - defer wg.Done() - for piece := range toProcess { - doWorkPiece(piece) - } - }() - } - wg.Wait() -} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go deleted file mode 100644 index dc9a7cc7b7..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sync" -) - -type Interface interface { - Add(item interface{}) - Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) - ShutDown() - ShuttingDown() bool -} - -// New constructs a new work queue (see the package comment). -func New() *Type { - return NewNamed("") -} - -func NewNamed(name string) *Type { - return &Type{ - dirty: set{}, - processing: set{}, - cond: sync.NewCond(&sync.Mutex{}), - metrics: newQueueMetrics(name), - } -} - -// Type is a work queue (see the package comment). -type Type struct { - // queue defines the order in which we will work on items. Every - // element of queue should be in the dirty set and not in the - // processing set. - queue []t - - // dirty defines all of the items that need to be processed. - dirty set - - // Things that are currently being processed are in the processing set. - // These things may be simultaneously in the dirty set. When we finish - // processing something and remove it from this set, we'll check if - // it's in the dirty set, and if so, add it to the queue. - processing set - - cond *sync.Cond - - shuttingDown bool - - metrics queueMetrics -} - -type empty struct{} -type t interface{} -type set map[t]empty - -func (s set) has(item t) bool { - _, exists := s[item] - return exists -} - -func (s set) insert(item t) { - s[item] = empty{} -} - -func (s set) delete(item t) { - delete(s, item) -} - -// Add marks item as needing processing. -func (q *Type) Add(item interface{}) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - if q.shuttingDown { - return - } - if q.dirty.has(item) { - return - } - - q.metrics.add(item) - - q.dirty.insert(item) - if q.processing.has(item) { - return - } - - q.queue = append(q.queue, item) - q.cond.Signal() -} - -// Len returns the current queue length, for informational purposes only. You -// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular -// value, that can't be synchronized properly. -func (q *Type) Len() int { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return len(q.queue) -} - -// Get blocks until it can return an item to be processed. If shutdown = true, -// the caller should end their goroutine. You must call Done with item when you -// have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { - q.cond.Wait() - } - if len(q.queue) == 0 { - // We must be shutting down. - return nil, true - } - - item, q.queue = q.queue[0], q.queue[1:] - - q.metrics.get(item) - - q.processing.insert(item) - q.dirty.delete(item) - - return item, false -} - -// Done marks item as done processing, and if it has been marked as dirty again -// while it was being processed, it will be re-added to the queue for -// re-processing. -func (q *Type) Done(item interface{}) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - - q.metrics.done(item) - - q.processing.delete(item) - if q.dirty.has(item) { - q.queue = append(q.queue, item) - q.cond.Signal() - } -} - -// ShutDown will cause q to ignore all new items added to it. As soon as the -// worker goroutines have drained the existing items in the queue, they will be -// instructed to exit. -func (q *Type) ShutDown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() - q.shuttingDown = true - q.cond.Broadcast() -} - -func (q *Type) ShuttingDown() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - - return q.shuttingDown -} diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go deleted file mode 100644 index 417ac001b8..0000000000 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -// RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface - - // AddRateLimited adds an item to the workqueue after the rate limiter says its ok - AddRateLimited(item interface{}) - - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing - // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you - // still have to call `Done` on the queue. - Forget(item interface{}) - - // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int -} - -// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability -// Remember to call Forget! If you don't, you may end up tracking failures forever. -func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), - rateLimiter: rateLimiter, - } -} - -func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } -} - -// rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface - - rateLimiter RateLimiter -} - -// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) -} - -func (q *rateLimitingType) NumRequeues(item interface{}) int { - return q.rateLimiter.NumRequeues(item) -} - -func (q *rateLimitingType) Forget(item interface{}) { - q.rateLimiter.Forget(item) -} From 9f94c06e67d636a3fe53fc092bc3e7964ae13ef5 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Thu, 11 Oct 2018 19:40:09 +0300 Subject: [PATCH 03/17] fix: rename spotinst's feature flag --- cmd/kops/create_cluster.go | 4 ++-- pkg/featureflag/featureflag.go | 4 ++-- pkg/resources/aws/aws.go | 2 +- upup/pkg/fi/cloudup/apply_cluster.go | 2 +- upup/pkg/fi/cloudup/awsup/aws_cloud.go | 2 +- upup/pkg/fi/cloudup/bootstrapchannelbuilder.go | 2 +- upup/pkg/fi/cloudup/template_functions.go | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index 165ffa7ceb..b886314b61 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -358,7 +358,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().StringVar(&options.VSphereDatastore, "vsphere-datastore", options.VSphereDatastore, "vsphere-datastore is required for vSphere. Set a valid datastore in which to store dynamic provision volumes.") } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { // Spotinst flags cmd.Flags().StringVar(&options.SpotinstProduct, "spotinst-product", options.SpotinstProduct, "Set the product code.") cmd.Flags().StringVar(&options.SpotinstOrientation, "spotinst-orientation", options.SpotinstOrientation, "Set the group orientation.") @@ -852,7 +852,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e cluster.Spec.CloudConfig.VSphereDatastore = fi.String(c.VSphereDatastore) } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { if cluster.Spec.CloudConfig == nil { cluster.Spec.CloudConfig = &api.CloudConfiguration{} } diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go index 66d935b95b..a8e053b219 100644 --- a/pkg/featureflag/featureflag.go +++ b/pkg/featureflag/featureflag.go @@ -77,8 +77,8 @@ var GoogleCloudBucketAcl = New("GoogleCloudBucketAcl", Bool(false)) // EnableNodeAuthorization enables the node authorization features var EnableNodeAuthorization = New("EnableNodeAuthorization", Bool(false)) -// SpotinstIntegration toggles the use of Spotinst integration. -var SpotinstIntegration = New("SpotinstIntegration", Bool(false)) +// Spotinst toggles the use of Spotinst integration. +var Spotinst = New("Spotinst", Bool(false)) var flags = make(map[string]*FeatureFlag) var flagsMutex sync.Mutex diff --git a/pkg/resources/aws/aws.go b/pkg/resources/aws/aws.go index 16dbb3b7b3..479cde54e2 100644 --- a/pkg/resources/aws/aws.go +++ b/pkg/resources/aws/aws.go @@ -84,7 +84,7 @@ func ListResourcesAWS(cloud awsup.AWSCloud, clusterName string) (map[string]*res ListIAMRoles, } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { // Spotinst Elastigroups listFunctions = append(listFunctions, ListSpotinstElastigroups) } else { diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index 1d22b0bfbf..c37372312e 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -678,7 +678,7 @@ func (c *ApplyClusterCmd) Run() error { KopsModelContext: modelContext, } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { l.Builders = append(l.Builders, &spotinstmodel.ElastigroupModelBuilder{ AWSModelContext: awsModelContext, BootstrapScript: bootstrapScriptBuilder, diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index bb8e53b044..1f2e15f530 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -269,7 +269,7 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) { c.route53.Handlers.Send.PushFront(requestLogger) c.addHandlers(region, &c.route53.Handlers) - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { c.spotinst, err = spotinst.NewService(kops.CloudProviderAWS) if err != nil { return c, err diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 43194737dd..5795f7649b 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -445,7 +445,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri } } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { key := "spotinst-kubernetes-cluster-controller.addons.k8s.io" version := "1.0.16" diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index 97d9ebd276..67fb475bc4 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -104,7 +104,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN") } - if featureflag.SpotinstIntegration.Enabled() { + if featureflag.Spotinst.Enabled() { if creds, err := spotinst.LoadCredentials(); err == nil { dest["SpotinstToken"] = func() string { return creds.Token } dest["SpotinstAccount"] = func() string { return creds.Account } From 0b9ab268623b8f27e7533738ff1547a0e5fde3e2 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Thu, 11 Oct 2018 19:45:06 +0300 Subject: [PATCH 04/17] fix: don't use curly brackets for additional scoping --- pkg/model/spotinstmodel/elastigroup.go | 400 ++++++++++++------------- 1 file changed, 185 insertions(+), 215 deletions(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index a5151cae28..c81da4331d 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -84,279 +84,249 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { } // Cloud config. - { - if cfg := b.Cluster.Spec.CloudConfig; cfg != nil { - // Product. - if cfg.SpotinstProduct != nil { - group.Product = cfg.SpotinstProduct - } + if cfg := b.Cluster.Spec.CloudConfig; cfg != nil { + // Product. + if cfg.SpotinstProduct != nil { + group.Product = cfg.SpotinstProduct + } - // Orientation. - if cfg.SpotinstOrientation != nil { - group.Orientation = cfg.SpotinstOrientation - } + // Orientation. + if cfg.SpotinstOrientation != nil { + group.Orientation = cfg.SpotinstOrientation } } // Strategy. - { - for k, v := range ig.ObjectMeta.Labels { - switch k { - case InstanceGroupLabelOrientation: - group.Orientation = fi.String(v) - break + for k, v := range ig.ObjectMeta.Labels { + switch k { + case InstanceGroupLabelOrientation: + group.Orientation = fi.String(v) + break - case InstanceGroupLabelUtilizeReservedInstances: - if v == "true" { - group.UtilizeReservedInstances = fi.Bool(true) - } else if v == "false" { - group.UtilizeReservedInstances = fi.Bool(false) - } - break - - case InstanceGroupLabelFallbackToOnDemand: - if v == "true" { - group.FallbackToOnDemand = fi.Bool(true) - } else if v == "false" { - group.FallbackToOnDemand = fi.Bool(false) - } - break + case InstanceGroupLabelUtilizeReservedInstances: + if v == "true" { + group.UtilizeReservedInstances = fi.Bool(true) + } else if v == "false" { + group.UtilizeReservedInstances = fi.Bool(false) } + break + + case InstanceGroupLabelFallbackToOnDemand: + if v == "true" { + group.FallbackToOnDemand = fi.Bool(true) + } else if v == "false" { + group.FallbackToOnDemand = fi.Bool(false) + } + break } } // Instance profile. - { - iprof, err := b.LinkToIAMInstanceProfile(ig) + iprof, err := b.LinkToIAMInstanceProfile(ig) + if err != nil { + return err + } + group.IAMInstanceProfile = iprof + + // Root volume. + volumeSize := fi.Int32Value(ig.Spec.RootVolumeSize) + if volumeSize == 0 { + var err error + volumeSize, err = defaults.DefaultInstanceGroupVolumeSize(ig.Spec.Role) if err != nil { return err } - group.IAMInstanceProfile = iprof } - // Root volume. - { - volumeSize := fi.Int32Value(ig.Spec.RootVolumeSize) - if volumeSize == 0 { - var err error - volumeSize, err = defaults.DefaultInstanceGroupVolumeSize(ig.Spec.Role) - if err != nil { - return err - } - } - - volumeType := fi.StringValue(ig.Spec.RootVolumeType) - if volumeType == "" { - volumeType = awsmodel.DefaultVolumeType - } - - group.RootVolumeSize = fi.Int64(int64(volumeSize)) - group.RootVolumeType = fi.String(volumeType) - group.RootVolumeOptimization = ig.Spec.RootVolumeOptimization + volumeType := fi.StringValue(ig.Spec.RootVolumeType) + if volumeType == "" { + volumeType = awsmodel.DefaultVolumeType } + group.RootVolumeSize = fi.Int64(int64(volumeSize)) + group.RootVolumeType = fi.String(volumeType) + group.RootVolumeOptimization = ig.Spec.RootVolumeOptimization + // Tenancy. - { - if ig.Spec.Tenancy != "" { - group.Tenancy = fi.String(ig.Spec.Tenancy) - } + if ig.Spec.Tenancy != "" { + group.Tenancy = fi.String(ig.Spec.Tenancy) } // Risk. - { - var risk float64 - switch ig.Spec.Role { - case kops.InstanceGroupRoleMaster: - risk = 0 - case kops.InstanceGroupRoleNode: - risk = 100 - case kops.InstanceGroupRoleBastion: - risk = 0 - default: - return fmt.Errorf("spotinst: kops.Role not found %s", ig.Spec.Role) - } - group.Risk = &risk + var risk float64 + switch ig.Spec.Role { + case kops.InstanceGroupRoleMaster: + risk = 0 + case kops.InstanceGroupRoleNode: + risk = 100 + case kops.InstanceGroupRoleBastion: + risk = 0 + default: + return fmt.Errorf("spotinst: kops.Role not found %s", ig.Spec.Role) } + group.Risk = &risk // Security groups. - { - for _, id := range ig.Spec.AdditionalSecurityGroups { - sgTask := &awstasks.SecurityGroup{ - Name: fi.String(id), - ID: fi.String(id), - Shared: fi.Bool(true), - } - if err := c.EnsureTask(sgTask); err != nil { - return err - } - group.SecurityGroups = append(group.SecurityGroups, sgTask) + for _, id := range ig.Spec.AdditionalSecurityGroups { + sgTask := &awstasks.SecurityGroup{ + Name: fi.String(id), + ID: fi.String(id), + Shared: fi.Bool(true), } + if err := c.EnsureTask(sgTask); err != nil { + return err + } + group.SecurityGroups = append(group.SecurityGroups, sgTask) } // SSH Key. - { - sshKey, err := b.LinkToSSHKey() - if err != nil { - return err - } - group.SSHKey = sshKey + sshKey, err := b.LinkToSSHKey() + if err != nil { + return err } + group.SSHKey = sshKey // Load balancer. - { - var lb *awstasks.LoadBalancer - switch ig.Spec.Role { - case kops.InstanceGroupRoleMaster: - if b.UseLoadBalancerForAPI() { - lb = b.LinkToELB("api") - } - case kops.InstanceGroupRoleBastion: - lb = b.LinkToELB(model.BastionELBSecurityGroupPrefix) - } - if lb != nil { - group.LoadBalancer = lb + var lb *awstasks.LoadBalancer + switch ig.Spec.Role { + case kops.InstanceGroupRoleMaster: + if b.UseLoadBalancerForAPI() { + lb = b.LinkToELB("api") } + case kops.InstanceGroupRoleBastion: + lb = b.LinkToELB(model.BastionELBSecurityGroupPrefix) + } + if lb != nil { + group.LoadBalancer = lb } // User data. - { - userData, err := b.BootstrapScript.ResourceNodeUp(ig, b.Cluster) - if err != nil { - return err - } - group.UserData = userData + userData, err := b.BootstrapScript.ResourceNodeUp(ig, b.Cluster) + if err != nil { + return err } + group.UserData = userData // Public IP. - { - subnetMap := make(map[string]*kops.ClusterSubnetSpec) - for i := range b.Cluster.Spec.Subnets { - subnet := &b.Cluster.Spec.Subnets[i] - subnetMap[subnet.Name] = subnet - } - - var subnetType kops.SubnetType - for _, subnetName := range ig.Spec.Subnets { - subnet := subnetMap[subnetName] - if subnet == nil { - return fmt.Errorf("spotinst: InstanceGroup %q uses subnet %q that does not exist", ig.ObjectMeta.Name, subnetName) - } - if subnetType != "" && subnetType != subnet.Type { - return fmt.Errorf("spotinst: InstanceGroup %q cannot be in subnets of different Type", ig.ObjectMeta.Name) - } - subnetType = subnet.Type - } - - associatePublicIP := true - switch subnetType { - case kops.SubnetTypePublic, kops.SubnetTypeUtility: - associatePublicIP = true - if ig.Spec.AssociatePublicIP != nil { - associatePublicIP = *ig.Spec.AssociatePublicIP - } - case kops.SubnetTypePrivate: - associatePublicIP = false - if ig.Spec.AssociatePublicIP != nil { - if *ig.Spec.AssociatePublicIP { - glog.Warningf("Ignoring AssociatePublicIP=true for private InstanceGroup %q", ig.ObjectMeta.Name) - } - } - default: - return fmt.Errorf("spotinst: unknown subnet type %q", subnetType) - } - group.AssociatePublicIP = &associatePublicIP + subnetMap := make(map[string]*kops.ClusterSubnetSpec) + for i := range b.Cluster.Spec.Subnets { + subnet := &b.Cluster.Spec.Subnets[i] + subnetMap[subnet.Name] = subnet } + var subnetType kops.SubnetType + for _, subnetName := range ig.Spec.Subnets { + subnet := subnetMap[subnetName] + if subnet == nil { + return fmt.Errorf("spotinst: InstanceGroup %q uses subnet %q that does not exist", ig.ObjectMeta.Name, subnetName) + } + if subnetType != "" && subnetType != subnet.Type { + return fmt.Errorf("spotinst: InstanceGroup %q cannot be in subnets of different Type", ig.ObjectMeta.Name) + } + subnetType = subnet.Type + } + + associatePublicIP := true + switch subnetType { + case kops.SubnetTypePublic, kops.SubnetTypeUtility: + associatePublicIP = true + if ig.Spec.AssociatePublicIP != nil { + associatePublicIP = *ig.Spec.AssociatePublicIP + } + case kops.SubnetTypePrivate: + associatePublicIP = false + if ig.Spec.AssociatePublicIP != nil { + if *ig.Spec.AssociatePublicIP { + glog.Warningf("Ignoring AssociatePublicIP=true for private InstanceGroup %q", ig.ObjectMeta.Name) + } + } + default: + return fmt.Errorf("spotinst: unknown subnet type %q", subnetType) + } + group.AssociatePublicIP = &associatePublicIP + // Subnets. - { - subnets, err := b.GatherSubnets(ig) - if err != nil { - return err - } - if len(subnets) == 0 { - return fmt.Errorf("spotinst: could not determine any subnets for InstanceGroup %q; subnets was %s", ig.ObjectMeta.Name, ig.Spec.Subnets) - } - for _, subnet := range subnets { - group.Subnets = append(group.Subnets, b.LinkToSubnet(subnet)) - } + subnets, err := b.GatherSubnets(ig) + if err != nil { + return err + } + if len(subnets) == 0 { + return fmt.Errorf("spotinst: could not determine any subnets for InstanceGroup %q; subnets was %s", ig.ObjectMeta.Name, ig.Spec.Subnets) + } + for _, subnet := range subnets { + group.Subnets = append(group.Subnets, b.LinkToSubnet(subnet)) } // Capacity. - { - minSize := int32(1) - if ig.Spec.MinSize != nil { - minSize = fi.Int32Value(ig.Spec.MinSize) - } else if ig.Spec.Role == kops.InstanceGroupRoleNode { - minSize = 2 - } - - maxSize := int32(1) - if ig.Spec.MaxSize != nil { - maxSize = *ig.Spec.MaxSize - } else if ig.Spec.Role == kops.InstanceGroupRoleNode { - maxSize = 10 - } - - group.MinSize = fi.Int64(int64(minSize)) - group.MaxSize = fi.Int64(int64(maxSize)) + minSize := int32(1) + if ig.Spec.MinSize != nil { + minSize = fi.Int32Value(ig.Spec.MinSize) + } else if ig.Spec.Role == kops.InstanceGroupRoleNode { + minSize = 2 } + maxSize := int32(1) + if ig.Spec.MaxSize != nil { + maxSize = *ig.Spec.MaxSize + } else if ig.Spec.Role == kops.InstanceGroupRoleNode { + maxSize = 10 + } + + group.MinSize = fi.Int64(int64(minSize)) + group.MaxSize = fi.Int64(int64(maxSize)) + // Tags. - { - tags, err := b.CloudTagsForInstanceGroup(ig) - if err != nil { - return fmt.Errorf("spotinst: error building cloud tags: %v", err) - } - tags[awsup.TagClusterName] = b.ClusterName() - tags["Name"] = b.AutoscalingGroupName(ig) - group.Tags = tags + tags, err := b.CloudTagsForInstanceGroup(ig) + if err != nil { + return fmt.Errorf("spotinst: error building cloud tags: %v", err) } + tags[awsup.TagClusterName] = b.ClusterName() + tags["Name"] = b.AutoscalingGroupName(ig) + group.Tags = tags // Auto Scaler. - { - if ig.Spec.Role != kops.InstanceGroupRoleBastion { - group.ClusterIdentifier = fi.String(b.ClusterName()) + if ig.Spec.Role != kops.InstanceGroupRoleBastion { + group.ClusterIdentifier = fi.String(b.ClusterName()) - // Toggle auto scaler's features. - var autoScalerDisabled bool - var autoScalerNodeLabels bool - { - for k, v := range ig.ObjectMeta.Labels { - switch k { - case InstanceGroupLabelAutoScalerDisabled: - if v == "true" { - autoScalerDisabled = true - } else if v == "false" { - autoScalerDisabled = false - } - break - - case InstanceGroupLabelAutoScalerNodeLabels: - if v == "true" { - autoScalerNodeLabels = true - } else if v == "false" { - autoScalerNodeLabels = false - } - break + // Toggle auto scaler's features. + var autoScalerDisabled bool + var autoScalerNodeLabels bool + { + for k, v := range ig.ObjectMeta.Labels { + switch k { + case InstanceGroupLabelAutoScalerDisabled: + if v == "true" { + autoScalerDisabled = true + } else if v == "false" { + autoScalerDisabled = false } + break + + case InstanceGroupLabelAutoScalerNodeLabels: + if v == "true" { + autoScalerNodeLabels = true + } else if v == "false" { + autoScalerNodeLabels = false + } + break } } + } - // Toggle the auto scaler. - group.AutoScalerEnabled = fi.Bool(!autoScalerDisabled) + // Toggle the auto scaler. + group.AutoScalerEnabled = fi.Bool(!autoScalerDisabled) - // Set the node labels. - if ig.Spec.Role == kops.InstanceGroupRoleNode { - nodeLabels := make(map[string]string) - for k, v := range ig.Spec.NodeLabels { - if strings.HasPrefix(k, kops.NodeLabelInstanceGroup) && !autoScalerNodeLabels { - continue - } - nodeLabels[k] = v - } - if len(nodeLabels) > 0 { - group.AutoScalerNodeLabels = nodeLabels + // Set the node labels. + if ig.Spec.Role == kops.InstanceGroupRoleNode { + nodeLabels := make(map[string]string) + for k, v := range ig.Spec.NodeLabels { + if strings.HasPrefix(k, kops.NodeLabelInstanceGroup) && !autoScalerNodeLabels { + continue } + nodeLabels[k] = v + } + if len(nodeLabels) > 0 { + group.AutoScalerNodeLabels = nodeLabels } } } From 46b1c70b96ea1044708951c4b497ced586888c3a Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 00:40:52 +0300 Subject: [PATCH 05/17] fix: ignore additional instance types --- pkg/apis/kops/validation/aws.go | 15 ++++++++------- pkg/model/awsmodel/autoscalinggroup.go | 3 ++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/apis/kops/validation/aws.go b/pkg/apis/kops/validation/aws.go index bb63048a4c..385ef7f86d 100644 --- a/pkg/apis/kops/validation/aws.go +++ b/pkg/apis/kops/validation/aws.go @@ -94,15 +94,16 @@ func awsValidateAMIforNVMe(fieldPath *field.Path, ig *kops.InstanceGroup) field. allErrs := field.ErrorList{} for _, prefix := range NVMe_INSTANCE_PREFIXES { - if strings.Contains(strings.ToUpper(ig.Spec.MachineType), strings.ToUpper(prefix)) { - glog.V(2).Infof("machineType %s requires an image based on stretch to operate. Trying to check compatibility", ig.Spec.MachineType) - if strings.Contains(ig.Spec.Image, "jessie") { - errString := fmt.Sprintf("%s cannot use machineType %s with image based on Debian jessie.", ig.Name, ig.Spec.MachineType) - allErrs = append(allErrs, field.Forbidden(fieldPath, errString)) - continue + for _, machineType := range strings.Split(ig.Spec.MachineType, ",") { + if strings.Contains(strings.ToUpper(machineType), strings.ToUpper(prefix)) { + glog.V(2).Infof("machineType %s requires an image based on stretch to operate. Trying to check compatibility", machineType) + if strings.Contains(ig.Spec.Image, "jessie") { + errString := fmt.Sprintf("%s cannot use machineType %s with image based on Debian jessie.", ig.Name, machineType) + allErrs = append(allErrs, field.Forbidden(fieldPath, errString)) + continue + } } } - } return allErrs } diff --git a/pkg/model/awsmodel/autoscalinggroup.go b/pkg/model/awsmodel/autoscalinggroup.go index 90ea27d2b2..aa50828cd1 100644 --- a/pkg/model/awsmodel/autoscalinggroup.go +++ b/pkg/model/awsmodel/autoscalinggroup.go @@ -18,6 +18,7 @@ package awsmodel import ( "fmt" + "strings" "github.com/golang/glog" @@ -98,7 +99,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { }, IAMInstanceProfile: link, ImageID: s(ig.Spec.Image), - InstanceType: s(ig.Spec.MachineType), + InstanceType: s(strings.Split(ig.Spec.MachineType, ",")[0]), InstanceMonitoring: ig.Spec.DetailedInstanceMonitoring, RootVolumeSize: i64(int64(volumeSize)), From 7ae8515a05d0a816ce05ad9092e2b2ed66fd5eac Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 00:42:52 +0300 Subject: [PATCH 06/17] fix: execute gofmt using go1.10 --- upup/pkg/fi/cloudup/apply_cluster.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index c37372312e..c0ffffc868 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -382,16 +382,16 @@ func (c *ApplyClusterCmd) Run() error { "iamRolePolicy": &awstasks.IAMRolePolicy{}, // VPC / Networking - "dhcpOptions": &awstasks.DHCPOptions{}, - "internetGateway": &awstasks.InternetGateway{}, - "route": &awstasks.Route{}, - "routeTable": &awstasks.RouteTable{}, - "routeTableAssociation": &awstasks.RouteTableAssociation{}, - "securityGroup": &awstasks.SecurityGroup{}, - "securityGroupRule": &awstasks.SecurityGroupRule{}, - "subnet": &awstasks.Subnet{}, - "vpc": &awstasks.VPC{}, - "ngw": &awstasks.NatGateway{}, + "dhcpOptions": &awstasks.DHCPOptions{}, + "internetGateway": &awstasks.InternetGateway{}, + "route": &awstasks.Route{}, + "routeTable": &awstasks.RouteTable{}, + "routeTableAssociation": &awstasks.RouteTableAssociation{}, + "securityGroup": &awstasks.SecurityGroup{}, + "securityGroupRule": &awstasks.SecurityGroupRule{}, + "subnet": &awstasks.Subnet{}, + "vpc": &awstasks.VPC{}, + "ngw": &awstasks.NatGateway{}, "vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{}, // ELB From 6509e32b1bfd62d2d0379f1ad5979c9dfa75cd36 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 00:50:00 +0300 Subject: [PATCH 07/17] fix: boilerplate headers and packages list --- hack/.packages | 3 +++ pkg/resources/spotinst/aws.go | 16 ++++++++++++++++ pkg/resources/spotinst/interfaces.go | 16 ++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/hack/.packages b/hack/.packages index 5a057c98cd..7f5367ee6f 100644 --- a/hack/.packages +++ b/hack/.packages @@ -108,6 +108,7 @@ k8s.io/kops/pkg/model/gcemodel k8s.io/kops/pkg/model/iam k8s.io/kops/pkg/model/openstackmodel k8s.io/kops/pkg/model/resources +k8s.io/kops/pkg/model/spotinstmodel k8s.io/kops/pkg/model/vspheremodel k8s.io/kops/pkg/openapi k8s.io/kops/pkg/pki @@ -120,6 +121,7 @@ k8s.io/kops/pkg/resources/digitalocean/dns k8s.io/kops/pkg/resources/gce k8s.io/kops/pkg/resources/openstack k8s.io/kops/pkg/resources/ops +k8s.io/kops/pkg/resources/spotinst k8s.io/kops/pkg/sshcredentials k8s.io/kops/pkg/systemd k8s.io/kops/pkg/templates @@ -163,6 +165,7 @@ k8s.io/kops/upup/pkg/fi/cloudup/gce k8s.io/kops/upup/pkg/fi/cloudup/gcetasks k8s.io/kops/upup/pkg/fi/cloudup/openstack k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks +k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks k8s.io/kops/upup/pkg/fi/cloudup/terraform k8s.io/kops/upup/pkg/fi/cloudup/vsphere k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks diff --git a/pkg/resources/spotinst/aws.go b/pkg/resources/spotinst/aws.go index cc75d65779..7824397d36 100644 --- a/pkg/resources/spotinst/aws.go +++ b/pkg/resources/spotinst/aws.go @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package spotinst import ( diff --git a/pkg/resources/spotinst/interfaces.go b/pkg/resources/spotinst/interfaces.go index 549f46bed8..d8ec3d0179 100644 --- a/pkg/resources/spotinst/interfaces.go +++ b/pkg/resources/spotinst/interfaces.go @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package spotinst import ( From 4ecc03ad99441c77a7628c6fdeab1ab6b5590334 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 01:07:07 +0300 Subject: [PATCH 08/17] fix: add product descriptions to the help output --- cmd/kops/create_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index b886314b61..8a7d4f38f6 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -360,8 +360,8 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { if featureflag.Spotinst.Enabled() { // Spotinst flags - cmd.Flags().StringVar(&options.SpotinstProduct, "spotinst-product", options.SpotinstProduct, "Set the product code.") - cmd.Flags().StringVar(&options.SpotinstOrientation, "spotinst-orientation", options.SpotinstOrientation, "Set the group orientation.") + cmd.Flags().StringVar(&options.SpotinstProduct, "spotinst-product", options.SpotinstProduct, "Set the product description (valid values: Linux/UNIX, Linux/UNIX (Amazon VPC), Windows and Windows (Amazon VPC))") + cmd.Flags().StringVar(&options.SpotinstOrientation, "spotinst-orientation", options.SpotinstOrientation, "Set the prediction strategy (valid values: balanced, cost, equal-distribution and availability)") } return cmd From eec98c63c3468e977eedc811ee5728ae98fffc07 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 02:05:20 +0300 Subject: [PATCH 09/17] fix: generate conversion functions (apimachinery) --- pkg/apis/kops/v1alpha1/componentconfig.go | 3 +++ .../kops/v1alpha1/zz_generated.conversion.go | 4 ++++ .../kops/v1alpha1/zz_generated.deepcopy.go | 18 ++++++++++++++++++ pkg/apis/kops/v1alpha2/componentconfig.go | 3 +++ .../kops/v1alpha2/zz_generated.conversion.go | 4 ++++ .../kops/v1alpha2/zz_generated.deepcopy.go | 18 ++++++++++++++++++ pkg/apis/kops/zz_generated.deepcopy.go | 18 ++++++++++++++++++ 7 files changed, 68 insertions(+) diff --git a/pkg/apis/kops/v1alpha1/componentconfig.go b/pkg/apis/kops/v1alpha1/componentconfig.go index 6b5175021a..6574fff5d9 100644 --- a/pkg/apis/kops/v1alpha1/componentconfig.go +++ b/pkg/apis/kops/v1alpha1/componentconfig.go @@ -493,6 +493,9 @@ type CloudConfiguration struct { VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"` VSphereDatastore *string `json:"vSphereDatastore,omitempty"` VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"` + // Spotinst cloud-config specs + SpotinstProduct *string `json:"spotinstProduct,omitempty"` + SpotinstOrientation *string `json:"spotinstOrientation,omitempty"` } // HasAdmissionController checks if a specific admission controller is enabled diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index 342c17a3cd..a83cef6a12 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -656,6 +656,8 @@ func autoConvert_v1alpha1_CloudConfiguration_To_kops_CloudConfiguration(in *Clou out.VSphereResourcePool = in.VSphereResourcePool out.VSphereDatastore = in.VSphereDatastore out.VSphereCoreDNSServer = in.VSphereCoreDNSServer + out.SpotinstProduct = in.SpotinstProduct + out.SpotinstOrientation = in.SpotinstOrientation return nil } @@ -677,6 +679,8 @@ func autoConvert_kops_CloudConfiguration_To_v1alpha1_CloudConfiguration(in *kops out.VSphereResourcePool = in.VSphereResourcePool out.VSphereDatastore = in.VSphereDatastore out.VSphereCoreDNSServer = in.VSphereCoreDNSServer + out.SpotinstProduct = in.SpotinstProduct + out.SpotinstOrientation = in.SpotinstOrientation return nil } diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index e7d1424afe..dfb21bb462 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -525,6 +525,24 @@ func (in *CloudConfiguration) DeepCopyInto(out *CloudConfiguration) { **out = **in } } + if in.SpotinstProduct != nil { + in, out := &in.SpotinstProduct, &out.SpotinstProduct + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.SpotinstOrientation != nil { + in, out := &in.SpotinstOrientation, &out.SpotinstOrientation + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index becf78bdc3..7e995c266f 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -493,6 +493,9 @@ type CloudConfiguration struct { VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"` VSphereDatastore *string `json:"vSphereDatastore,omitempty"` VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"` + // Spotinst cloud-config specs + SpotinstProduct *string `json:"spotinstProduct,omitempty"` + SpotinstOrientation *string `json:"spotinstOrientation,omitempty"` } // HasAdmissionController checks if a specific admission controller is enabled diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 7bcd1d1504..f41b239103 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -692,6 +692,8 @@ func autoConvert_v1alpha2_CloudConfiguration_To_kops_CloudConfiguration(in *Clou out.VSphereResourcePool = in.VSphereResourcePool out.VSphereDatastore = in.VSphereDatastore out.VSphereCoreDNSServer = in.VSphereCoreDNSServer + out.SpotinstProduct = in.SpotinstProduct + out.SpotinstOrientation = in.SpotinstOrientation return nil } @@ -713,6 +715,8 @@ func autoConvert_kops_CloudConfiguration_To_v1alpha2_CloudConfiguration(in *kops out.VSphereResourcePool = in.VSphereResourcePool out.VSphereDatastore = in.VSphereDatastore out.VSphereCoreDNSServer = in.VSphereCoreDNSServer + out.SpotinstProduct = in.SpotinstProduct + out.SpotinstOrientation = in.SpotinstOrientation return nil } diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 66d4e8140a..2e060de61d 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -498,6 +498,24 @@ func (in *CloudConfiguration) DeepCopyInto(out *CloudConfiguration) { **out = **in } } + if in.SpotinstProduct != nil { + in, out := &in.SpotinstProduct, &out.SpotinstProduct + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.SpotinstOrientation != nil { + in, out := &in.SpotinstOrientation, &out.SpotinstOrientation + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index 0e2bb2257c..40c6d682c2 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -603,6 +603,24 @@ func (in *CloudConfiguration) DeepCopyInto(out *CloudConfiguration) { **out = **in } } + if in.SpotinstProduct != nil { + in, out := &in.SpotinstProduct, &out.SpotinstProduct + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.SpotinstOrientation != nil { + in, out := &in.SpotinstOrientation, &out.SpotinstOrientation + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } From 037b6a1c2cdde783339f9691ba1976b0a4ba3674 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 22:42:26 +0300 Subject: [PATCH 10/17] deps: vendor spotinst-sdk-go --- Gopkg.lock | 4 +- .../spotinst/spotinst-sdk-go/LICENSE | 201 ++++++++++++++++++ 2 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/spotinst/spotinst-sdk-go/LICENSE diff --git a/Gopkg.lock b/Gopkg.lock index fd4d671ef7..0c9a50f8cc 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1076,7 +1076,7 @@ [[projects]] branch = "master" - digest = "1:b410da742bf89fa5f255ef1d95bdf42d2a263a0c8f35272b8d5b4faff49017cf" + digest = "1:1c5e9c80b7f295cb22373c59300c27c27c122a9c07410a6505b137afacf3583c" name = "github.com/spotinst/spotinst-sdk-go" packages = [ "service/elastigroup", @@ -1093,7 +1093,7 @@ "spotinst/util/uritemplates", ] pruneopts = "UT" - revision = "da4a02277b0a4c7c4a007c9265082237ef1170e5" + revision = "fed4677dbf8fe026a81e09e66fc38d863d091f9b" [[projects]] digest = "1:67ba0f5b63fa937e1e78273904a1fa0f7c2358c4dac967ac16e678f8e50e8aa5" diff --git a/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE b/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 2faf3be960632b2d78e557489dfed5f7676017a8 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 22:46:14 +0300 Subject: [PATCH 11/17] fix: remove unnecessary fmt.Errorf call --- upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go index 4b1f1c1851..345d0c6735 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go @@ -191,7 +191,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { if lc.UserData != nil { userData, err := base64.StdEncoding.DecodeString(*lc.UserData) if err != nil { - return nil, fmt.Errorf("spotinst: error decoding user data: %v", err) + return nil, err } actual.UserData = fi.WrapResource(fi.NewStringResource(string(userData))) } From 07376e5105b9fb2d53753029beae7cd347bbcef8 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 22:50:40 +0300 Subject: [PATCH 12/17] fix: max size defaults to 2 --- pkg/model/spotinstmodel/elastigroup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index c81da4331d..7337dd15c8 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -269,7 +269,7 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { if ig.Spec.MaxSize != nil { maxSize = *ig.Spec.MaxSize } else if ig.Spec.Role == kops.InstanceGroupRoleNode { - maxSize = 10 + maxSize = 2 } group.MinSize = fi.Int64(int64(minSize)) From cfa14b687da4ad7998996eeed462ebc656f83f44 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 23:08:09 +0300 Subject: [PATCH 13/17] fix: parse boolean values --- pkg/model/spotinstmodel/elastigroup.go | 41 ++++++++++++++++---------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index 7337dd15c8..cb075072c8 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -18,6 +18,7 @@ package spotinstmodel import ( "fmt" + "strconv" "strings" "github.com/golang/glog" @@ -104,19 +105,19 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { break case InstanceGroupLabelUtilizeReservedInstances: - if v == "true" { - group.UtilizeReservedInstances = fi.Bool(true) - } else if v == "false" { - group.UtilizeReservedInstances = fi.Bool(false) + b, err := parseBool(v) + if err != nil { + return err } + group.UtilizeReservedInstances = b break case InstanceGroupLabelFallbackToOnDemand: - if v == "true" { - group.FallbackToOnDemand = fi.Bool(true) - } else if v == "false" { - group.FallbackToOnDemand = fi.Bool(false) + b, err := parseBool(v) + if err != nil { + return err } + group.FallbackToOnDemand = b break } } @@ -295,19 +296,19 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { for k, v := range ig.ObjectMeta.Labels { switch k { case InstanceGroupLabelAutoScalerDisabled: - if v == "true" { - autoScalerDisabled = true - } else if v == "false" { - autoScalerDisabled = false + b, err := parseBool(v) + if err != nil { + return err } + autoScalerDisabled = fi.BoolValue(b) break case InstanceGroupLabelAutoScalerNodeLabels: - if v == "true" { - autoScalerNodeLabels = true - } else if v == "false" { - autoScalerNodeLabels = false + b, err := parseBool(v) + if err != nil { + return err } + autoScalerNodeLabels = fi.BoolValue(b) break } } @@ -336,3 +337,11 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { return nil } + +func parseBool(str string) (*bool, error) { + b, err := strconv.ParseBool(str) + if err != nil { + return nil, fmt.Errorf("spotinst: unexpected boolean value: %q", str) + } + return fi.Bool(b), nil +} From 67fd31696b0a53abd5b61361ea719077e2653f83 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Fri, 12 Oct 2018 23:18:42 +0300 Subject: [PATCH 14/17] fix: remove unnecessary nil check --- pkg/model/spotinstmodel/elastigroup.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index cb075072c8..7afb9155bf 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -86,15 +86,8 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { // Cloud config. if cfg := b.Cluster.Spec.CloudConfig; cfg != nil { - // Product. - if cfg.SpotinstProduct != nil { - group.Product = cfg.SpotinstProduct - } - - // Orientation. - if cfg.SpotinstOrientation != nil { - group.Orientation = cfg.SpotinstOrientation - } + group.Product = cfg.SpotinstProduct + group.Orientation = cfg.SpotinstOrientation } // Strategy. From e1f85e0827834d650c4d1c2b70f6637230f4a0d2 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Sun, 14 Oct 2018 11:36:09 +0300 Subject: [PATCH 15/17] fix: update doc comments --- pkg/resources/spotinst/aws.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/resources/spotinst/aws.go b/pkg/resources/spotinst/aws.go index 7824397d36..9f4fc93214 100644 --- a/pkg/resources/spotinst/aws.go +++ b/pkg/resources/spotinst/aws.go @@ -139,7 +139,7 @@ func (e *awsElastigroup) MinSize() int { return fi.IntValue(e.obj.Capacity.Minim // MaxSize returns the maximum size of the Elastigroup. func (e *awsElastigroup) MaxSize() int { return fi.IntValue(e.obj.Capacity.Maximum) } -// Obj returns the underlying object which is a cloud-specific implementation. +// Obj returns the raw object which is a cloud-specific implementation. func (e *awsElastigroup) Obj() interface{} { return e.obj } type awsInstance struct { @@ -149,5 +149,5 @@ type awsInstance struct { // Id returns the ID of the instance. func (i *awsInstance) Id() string { return fi.StringValue(i.obj.ID) } -// Obj returns the underlying object which is a cloud-specific implementation. +// Obj returns the raw object which is a cloud-specific implementation. func (i *awsInstance) Obj() interface{} { return i.obj } From a8db93b7d45dbc7614552ec19084a69390f31821 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Sun, 14 Oct 2018 11:54:58 +0300 Subject: [PATCH 16/17] fix: rename cluster_identifier field --- pkg/model/spotinstmodel/elastigroup.go | 2 +- upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index 7afb9155bf..0dd9fc8a23 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -280,7 +280,7 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { // Auto Scaler. if ig.Spec.Role != kops.InstanceGroupRoleBastion { - group.ClusterIdentifier = fi.String(b.ClusterName()) + group.AutoScalerClusterID = fi.String(b.ClusterName()) // Toggle auto scaler's features. var autoScalerDisabled bool diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go index 345d0c6735..b7cf3759b2 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go @@ -69,8 +69,8 @@ type Elastigroup struct { RootVolumeOptimization *bool Tenancy *string AutoScalerEnabled *bool + AutoScalerClusterID *string AutoScalerNodeLabels map[string]string - ClusterIdentifier *string } var _ fi.CompareWithID = &Elastigroup{} @@ -447,9 +447,9 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e // Integration. { - if e.ClusterIdentifier != nil { + if e.AutoScalerClusterID != nil { k8s := new(aws.KubernetesIntegration) - k8s.SetClusterIdentifier(e.ClusterIdentifier) + k8s.SetClusterIdentifier(e.AutoScalerClusterID) k8s.SetIntegrationMode(fi.String("pod")) if e.AutoScalerEnabled != nil { @@ -1152,10 +1152,10 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change // Integration. { - if e.ClusterIdentifier != nil { + if e.AutoScalerClusterID != nil { tf.Integration = &terraformElastigroupIntegration{ IntegrationMode: fi.String("pod"), - ClusterIdentifier: e.ClusterIdentifier, + ClusterIdentifier: e.AutoScalerClusterID, } if e.AutoScalerEnabled != nil { tf.Integration.AutoScaleIsEnabled = e.AutoScalerEnabled From 07a509b917364bc94c5f84a81dc64e67c6f77f90 Mon Sep 17 00:00:00 2001 From: Liran Polak Date: Sun, 14 Oct 2018 12:51:38 +0300 Subject: [PATCH 17/17] fix: rename autoscaler node labels metadata label --- pkg/model/spotinstmodel/elastigroup.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/model/spotinstmodel/elastigroup.go b/pkg/model/spotinstmodel/elastigroup.go index 0dd9fc8a23..f12b2285cb 100644 --- a/pkg/model/spotinstmodel/elastigroup.go +++ b/pkg/model/spotinstmodel/elastigroup.go @@ -51,10 +51,10 @@ const ( // instance group to specify whether the auto-scaler should be enabled. InstanceGroupLabelAutoScalerDisabled = "spotinst.io/autoscaler-disabled" - // InstanceGroupLabelAutoScalerNodeLabels is the metadata label used on the + // InstanceGroupLabelAutoScalerDefaultNodeLabels is the metadata label used on the // instance group to specify whether default node labels should be set for // the auto-scaler. - InstanceGroupLabelAutoScalerNodeLabels = "spotinst.io/autoscaler-node-labels" + InstanceGroupLabelAutoScalerDefaultNodeLabels = "spotinst.io/autoscaler-default-node-labels" ) // ElastigroupModelBuilder configures Elastigroup objects @@ -296,7 +296,7 @@ func (b *ElastigroupModelBuilder) Build(c *fi.ModelBuilderContext) error { autoScalerDisabled = fi.BoolValue(b) break - case InstanceGroupLabelAutoScalerNodeLabels: + case InstanceGroupLabelAutoScalerDefaultNodeLabels: b, err := parseBool(v) if err != nil { return err