diff --git a/docs/getting_started/spot-ocean.md b/docs/getting_started/spot-ocean.md index e1ddcb9314..e0f6fa2954 100644 --- a/docs/getting_started/spot-ocean.md +++ b/docs/getting_started/spot-ocean.md @@ -151,6 +151,7 @@ metadata: |---|---|---| | `spotinst.io/spot-percentage` | Specify the percentage of Spot instances that should spin up from the target capacity. | `100` | | `spotinst.io/utilize-reserved-instances` | Specify whether reserved instances should be utilized. | `true` | +| `spotinst.io/utilize-commitments` | Specify whether reserved instance commitments should be utilized. | none | | `spotinst.io/fallback-to-ondemand` | Specify whether fallback to on-demand instances should be enabled. | `true` | | `spotinst.io/draining-timeout` | Specify a period of time, in seconds, after a node is marked for termination during which on running pods remains active. | none | | `spotinst.io/grace-period` | Specify a period of time, in seconds, that Ocean should wait before applying instance health checks. | none | diff --git a/pkg/model/awsmodel/spotinst.go b/pkg/model/awsmodel/spotinst.go index 739821b746..6158a73c98 100644 --- a/pkg/model/awsmodel/spotinst.go +++ b/pkg/model/awsmodel/spotinst.go @@ -53,6 +53,10 @@ const ( // utilized. SpotInstanceGroupLabelUtilizeReservedInstances = "spotinst.io/utilize-reserved-instances" + // SpotInstanceGroupLabelUtilizeCommitments is the metadata label used + // on the instance group to specify whether commitments should be utilized. + SpotInstanceGroupLabelUtilizeCommitments = "spotinst.io/utilize-commitments" + // SpotInstanceGroupLabelFallbackToOnDemand is the metadata label used on the // instance group to specify whether fallback to on-demand instances should // be enabled. @@ -218,6 +222,12 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte return err } + case SpotInstanceGroupLabelUtilizeCommitments: + group.UtilizeCommitments, err = parseBool(v) + if err != nil { + return err + } + case SpotInstanceGroupLabelFallbackToOnDemand: group.FallbackToOnDemand, err = parseBool(v) if err != nil { @@ -380,6 +390,12 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig return err } + case SpotInstanceGroupLabelUtilizeCommitments: + ocean.UtilizeCommitments, err = parseBool(v) + if err != nil { + return err + } + case SpotInstanceGroupLabelFallbackToOnDemand: ocean.FallbackToOnDemand, err = parseBool(v) if err != nil { diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go index 77da9e54e8..a8a3c8700c 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go @@ -50,6 +50,7 @@ type Elastigroup struct { MaxSize *int64 SpotPercentage *float64 UtilizeReservedInstances *bool + UtilizeCommitments *bool FallbackToOnDemand *bool DrainingTimeout *int64 HealthCheckType *string @@ -211,6 +212,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { actual.Orientation = group.Strategy.AvailabilityVsCost actual.FallbackToOnDemand = group.Strategy.FallbackToOnDemand actual.UtilizeReservedInstances = group.Strategy.UtilizeReservedInstances + actual.UtilizeCommitments = group.Strategy.UtilizeCommitments if group.Strategy.DrainingTimeout != nil { actual.DrainingTimeout = fi.Int64(int64(fi.IntValue(group.Strategy.DrainingTimeout))) @@ -535,6 +537,7 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e group.Strategy.SetAvailabilityVsCost(fi.String(string(normalizeOrientation(e.Orientation)))) group.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand) group.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances) + group.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) if e.DrainingTimeout != nil { group.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) @@ -855,6 +858,17 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e changed = true } + // Utilize commitments. + if changes.UtilizeCommitments != nil { + if group.Strategy == nil { + group.Strategy = new(aws.Strategy) + } + + group.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) + changes.UtilizeCommitments = nil + changed = true + } + // Draining timeout. if changes.DrainingTimeout != nil { if group.Strategy == nil { @@ -1362,6 +1376,7 @@ type terraformElastigroup struct { Orientation *string `json:"orientation,omitempty" cty:"orientation"` FallbackToOnDemand *bool `json:"fallback_to_ondemand,omitempty" cty:"fallback_to_ondemand"` UtilizeReservedInstances *bool `json:"utilize_reserved_instances,omitempty" cty:"utilize_reserved_instances"` + UtilizeCommitments *bool `json:"utilize_commitments,omitempty" cty:"utilize_commitments"` DrainingTimeout *int64 `json:"draining_timeout,omitempty" cty:"draining_timeout"` OnDemand *string `json:"instance_types_ondemand,omitempty" cty:"instance_types_ondemand"` @@ -1464,6 +1479,7 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change Orientation: fi.String(string(normalizeOrientation(e.Orientation))), FallbackToOnDemand: e.FallbackToOnDemand, UtilizeReservedInstances: e.UtilizeReservedInstances, + UtilizeCommitments: e.UtilizeCommitments, DrainingTimeout: e.DrainingTimeout, OnDemand: e.OnDemandInstanceType, diff --git a/upup/pkg/fi/cloudup/spotinsttasks/ocean.go b/upup/pkg/fi/cloudup/spotinsttasks/ocean.go index e089b18a1e..b1c93aec5f 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/ocean.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/ocean.go @@ -45,6 +45,7 @@ type Ocean struct { MinSize *int64 MaxSize *int64 UtilizeReservedInstances *bool + UtilizeCommitments *bool FallbackToOnDemand *bool DrainingTimeout *int64 GracePeriod *int64 @@ -149,6 +150,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { if strategy := ocean.Strategy; strategy != nil { actual.FallbackToOnDemand = strategy.FallbackToOnDemand actual.UtilizeReservedInstances = strategy.UtilizeReservedInstances + actual.UtilizeCommitments = strategy.UtilizeCommitments if strategy.DrainingTimeout != nil { actual.DrainingTimeout = fi.Int64(int64(fi.IntValue(strategy.DrainingTimeout))) @@ -377,6 +379,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { { ocean.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand) ocean.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances) + ocean.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) if e.DrainingTimeout != nil { ocean.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) @@ -617,6 +620,17 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { changed = true } + // Utilize commitments. + if changes.UtilizeCommitments != nil { + if ocean.Strategy == nil { + ocean.Strategy = new(aws.Strategy) + } + + ocean.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) + changes.UtilizeCommitments = nil + changed = true + } + // Draining timeout. if changes.DrainingTimeout != nil { if ocean.Strategy == nil { @@ -993,6 +1007,7 @@ type terraformOcean struct { FallbackToOnDemand *bool `json:"fallback_to_ondemand,omitempty" cty:"fallback_to_ondemand"` UtilizeReservedInstances *bool `json:"utilize_reserved_instances,omitempty" cty:"utilize_reserved_instances"` + UtilizeCommitments *bool `json:"utilize_commitments,omitempty" cty:"utilize_commitments"` DrainingTimeout *int64 `json:"draining_timeout,omitempty" cty:"draining_timeout"` GracePeriod *int64 `json:"grace_period,omitempty" cty:"grace_period"` @@ -1021,6 +1036,7 @@ func (_ *Ocean) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Oce FallbackToOnDemand: e.FallbackToOnDemand, UtilizeReservedInstances: e.UtilizeReservedInstances, + UtilizeCommitments: e.UtilizeCommitments, DrainingTimeout: e.DrainingTimeout, GracePeriod: e.GracePeriod, }