mirror of https://github.com/kubernetes/kops.git
feat(spot/ocean): set spot percentage on launchspec level
This commit is contained in:
parent
50999d24bd
commit
2f874be0a1
4
go.mod
4
go.mod
|
@ -70,7 +70,7 @@ require (
|
|||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20180422025557-ae226422660e
|
||||
github.com/go-bindata/go-bindata/v3 v3.1.3
|
||||
github.com/go-ini/ini v1.51.0
|
||||
github.com/go-ini/ini v1.62.0
|
||||
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/google/go-cmp v0.5.2
|
||||
|
@ -90,7 +90,7 @@ require (
|
|||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/spotinst/spotinst-sdk-go v1.58.0
|
||||
github.com/spotinst/spotinst-sdk-go v1.75.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/weaveworks/mesh v0.0.0-20170419100114-1f158d31de55
|
||||
|
|
8
go.sum
8
go.sum
|
@ -343,8 +343,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
|||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.51.0 h1:VPJKXGzbKlyExUE8f41aV57yxkYx5R49yR6n7flp0M0=
|
||||
github.com/go-ini/ini v1.51.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
|
||||
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
|
@ -952,8 +952,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
|||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spotinst/spotinst-sdk-go v1.58.0 h1:h7617CMlfHL3W+CMlhs883kVy3MPdGML/Z4+mW+ddgc=
|
||||
github.com/spotinst/spotinst-sdk-go v1.58.0/go.mod h1:nWi2DyjUi1WUZclpsqZFXvImsU0T39ppqqHwC4/T5mw=
|
||||
github.com/spotinst/spotinst-sdk-go v1.75.0 h1:4eg0J1STZPnLxPiIYYYq7DYrApIkpzBpJAgzjFIgQfs=
|
||||
github.com/spotinst/spotinst-sdk-go v1.75.0/go.mod h1:sSRVZTSdUAPxeELD/urZkxcfU/DcxO1/UIdOxagqFBc=
|
||||
github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
|
|
|
@ -381,12 +381,6 @@ func (b *InstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, igs ..
|
|||
// Strategy and instance types.
|
||||
for k, v := range ig.ObjectMeta.Labels {
|
||||
switch k {
|
||||
case InstanceGroupLabelSpotPercentage:
|
||||
ocean.SpotPercentage, err = parseFloat(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case InstanceGroupLabelUtilizeReservedInstances:
|
||||
ocean.UtilizeReservedInstances, err = parseBool(v)
|
||||
if err != nil {
|
||||
|
@ -425,11 +419,6 @@ func (b *InstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, igs ..
|
|||
}
|
||||
}
|
||||
|
||||
// Spot percentage.
|
||||
if ocean.SpotPercentage == nil {
|
||||
ocean.SpotPercentage = defaultSpotPercentage(ig)
|
||||
}
|
||||
|
||||
// Capacity.
|
||||
ocean.MinSize = fi.Int64(0)
|
||||
ocean.MaxSize = fi.Int64(0)
|
||||
|
@ -598,6 +587,17 @@ func (b *InstanceGroupModelBuilder) buildLaunchSpec(c *fi.ModelBuilderContext,
|
|||
}
|
||||
}
|
||||
|
||||
// Strategy.
|
||||
for k, v := range ig.ObjectMeta.Labels {
|
||||
switch k {
|
||||
case InstanceGroupLabelSpotPercentage:
|
||||
launchSpec.SpotPercentage, err = parseInt(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Adding task: LaunchSpec/%s", fi.StringValue(launchSpec.Name))
|
||||
c.AddTask(launchSpec)
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ type LaunchSpec struct {
|
|||
Lifecycle *fi.Lifecycle
|
||||
|
||||
ID *string
|
||||
SpotPercentage *int64
|
||||
UserData fi.Resource
|
||||
SecurityGroups []*awstasks.SecurityGroup
|
||||
Subnets []*awstasks.Subnet
|
||||
|
@ -137,7 +138,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) {
|
|||
actual.Name = spec.Name
|
||||
actual.Ocean = &Ocean{
|
||||
ID: ocean.ID,
|
||||
Name: o.Ocean.Name,
|
||||
Name: ocean.Name,
|
||||
}
|
||||
|
||||
// Image.
|
||||
|
@ -270,6 +271,13 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Strategy.
|
||||
{
|
||||
if strategy := spec.Strategy; strategy != nil {
|
||||
actual.SpotPercentage = fi.Int64(int64(fi.IntValue(strategy.SpotPercentage)))
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid spurious changes.
|
||||
actual.Lifecycle = o.Lifecycle
|
||||
|
||||
|
@ -312,7 +320,10 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err
|
|||
|
||||
klog.V(2).Infof("Creating Launch Spec for Ocean %q", *ocean.ID)
|
||||
|
||||
spec := new(aws.LaunchSpec)
|
||||
spec := &aws.LaunchSpec{
|
||||
Strategy: new(aws.LaunchSpecStrategy),
|
||||
}
|
||||
|
||||
spec.SetName(e.Name)
|
||||
spec.SetOceanId(ocean.ID)
|
||||
|
||||
|
@ -442,7 +453,14 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err
|
|||
}
|
||||
}
|
||||
|
||||
// Wrap the raw object as an LaunchSpec.
|
||||
// Strategy.
|
||||
{
|
||||
if e.SpotPercentage != nil {
|
||||
spec.Strategy.SetSpotPercentage(fi.Int(int(*e.SpotPercentage)))
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap the raw object as a LaunchSpec.
|
||||
sp, err := spotinst.NewLaunchSpec(cloud.ProviderID(), spec)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -633,6 +651,20 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err
|
|||
}
|
||||
}
|
||||
|
||||
// Strategy.
|
||||
{
|
||||
// Spot percentage.
|
||||
if changes.SpotPercentage != nil {
|
||||
if spec.Strategy == nil {
|
||||
spec.Strategy = new(aws.LaunchSpecStrategy)
|
||||
}
|
||||
|
||||
spec.Strategy.SetSpotPercentage(fi.Int(int(fi.Int64Value(e.SpotPercentage))))
|
||||
changes.SpotPercentage = nil
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
|
||||
empty := &LaunchSpec{}
|
||||
if !reflect.DeepEqual(empty, changes) {
|
||||
klog.Warningf("Not all changes applied to Launch Spec %q: %v", *spec.ID, changes)
|
||||
|
@ -644,15 +676,40 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err
|
|||
}
|
||||
|
||||
klog.V(2).Infof("Updating Launch Spec %q (config: %s)", *spec.ID, stringutil.Stringify(spec))
|
||||
ctx := context.Background()
|
||||
|
||||
// Wrap the raw object as an LaunchSpec.
|
||||
ocean, err := e.Ocean.find(cloud.Spotinst().Ocean(), *e.Ocean.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset the Spot percentage on the Cluster level.
|
||||
if spec.Strategy != nil && spec.Strategy.SpotPercentage != nil &&
|
||||
ocean.Strategy != nil && ocean.Strategy.SpotPercentage != nil {
|
||||
c := &aws.Cluster{Strategy: new(aws.Strategy)}
|
||||
c.SetId(ocean.ID)
|
||||
c.Strategy.SetSpotPercentage(nil)
|
||||
|
||||
// Wrap the raw object as a Cluster.
|
||||
o, err := spotinst.NewOcean(cloud.ProviderID(), c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the existing Cluster.
|
||||
if err = cloud.Spotinst().Ocean().Update(ctx, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap the raw object as a Launch Spec.
|
||||
sp, err := spotinst.NewLaunchSpec(cloud.ProviderID(), spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update an existing LaunchSpec.
|
||||
if err := cloud.Spotinst().LaunchSpec().Update(context.Background(), sp); err != nil {
|
||||
// Update the existing Launch Spec.
|
||||
if err = cloud.Spotinst().LaunchSpec().Update(ctx, sp); err != nil {
|
||||
return fmt.Errorf("spotinst: failed to update launch spec: %v", err)
|
||||
}
|
||||
|
||||
|
@ -678,6 +735,11 @@ type terraformLaunchSpec struct {
|
|||
Labels []*terraformKV `json:"labels,omitempty" cty:"labels"`
|
||||
Tags []*terraformKV `json:"tags,omitempty" cty:"tags"`
|
||||
Headrooms []*terraformAutoScalerHeadroom `json:"autoscale_headrooms,omitempty" cty:"autoscale_headrooms"`
|
||||
Strategy *terraformLaunchSpecStrategy `json:"strategy,omitempty" cty:"strategy"`
|
||||
}
|
||||
|
||||
type terraformLaunchSpecStrategy struct {
|
||||
SpotPercentage *int64 `json:"spot_percentage,omitempty" cty:"spot_percentage"`
|
||||
}
|
||||
|
||||
func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *LaunchSpec) error {
|
||||
|
@ -811,6 +873,15 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes
|
|||
}
|
||||
}
|
||||
|
||||
// Strategy.
|
||||
{
|
||||
if e.SpotPercentage != nil {
|
||||
tf.Strategy = &terraformLaunchSpecStrategy{
|
||||
SpotPercentage: e.SpotPercentage,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return t.RenderResource("spotinst_ocean_aws_launch_spec", *e.Name, tf)
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ type Ocean struct {
|
|||
ID *string
|
||||
MinSize *int64
|
||||
MaxSize *int64
|
||||
SpotPercentage *float64
|
||||
UtilizeReservedInstances *bool
|
||||
FallbackToOnDemand *bool
|
||||
DrainingTimeout *int64
|
||||
|
@ -148,7 +147,6 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) {
|
|||
// Strategy.
|
||||
{
|
||||
if strategy := ocean.Strategy; strategy != nil {
|
||||
actual.SpotPercentage = strategy.SpotPercentage
|
||||
actual.FallbackToOnDemand = strategy.FallbackToOnDemand
|
||||
actual.UtilizeReservedInstances = strategy.UtilizeReservedInstances
|
||||
|
||||
|
@ -387,7 +385,6 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error {
|
|||
|
||||
// Strategy.
|
||||
{
|
||||
ocean.Strategy.SetSpotPercentage(e.SpotPercentage)
|
||||
ocean.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand)
|
||||
ocean.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances)
|
||||
|
||||
|
@ -619,17 +616,6 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error {
|
|||
|
||||
// Strategy.
|
||||
{
|
||||
// Spot percentage.
|
||||
if changes.SpotPercentage != nil {
|
||||
if ocean.Strategy == nil {
|
||||
ocean.Strategy = new(aws.Strategy)
|
||||
}
|
||||
|
||||
ocean.Strategy.SetSpotPercentage(e.SpotPercentage)
|
||||
changes.SpotPercentage = nil
|
||||
changed = true
|
||||
}
|
||||
|
||||
// Fallback to on-demand.
|
||||
if changes.FallbackToOnDemand != nil {
|
||||
if ocean.Strategy == nil {
|
||||
|
@ -1041,11 +1027,10 @@ type terraformOcean struct {
|
|||
MaxSize *int64 `json:"max_size,omitempty" cty:"max_size"`
|
||||
DesiredCapacity *int64 `json:"desired_capacity,omitempty" cty:"desired_capacity"`
|
||||
|
||||
SpotPercentage *float64 `json:"spot_percentage,omitempty" cty:"spot_percentage"`
|
||||
FallbackToOnDemand *bool `json:"fallback_to_ondemand,omitempty" cty:"fallback_to_ondemand"`
|
||||
UtilizeReservedInstances *bool `json:"utilize_reserved_instances,omitempty" cty:"utilize_reserved_instances"`
|
||||
DrainingTimeout *int64 `json:"draining_timeout,omitempty" cty:"draining_timeout"`
|
||||
GracePeriod *int64 `json:"grace_period,omitempty" cty:"grace_period"`
|
||||
FallbackToOnDemand *bool `json:"fallback_to_ondemand,omitempty" cty:"fallback_to_ondemand"`
|
||||
UtilizeReservedInstances *bool `json:"utilize_reserved_instances,omitempty" cty:"utilize_reserved_instances"`
|
||||
DrainingTimeout *int64 `json:"draining_timeout,omitempty" cty:"draining_timeout"`
|
||||
GracePeriod *int64 `json:"grace_period,omitempty" cty:"grace_period"`
|
||||
|
||||
Monitoring *bool `json:"monitoring,omitempty" cty:"monitoring"`
|
||||
EBSOptimized *bool `json:"ebs_optimized,omitempty" cty:"ebs_optimized"`
|
||||
|
@ -1073,7 +1058,6 @@ func (_ *Ocean) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Oce
|
|||
MinSize: e.MinSize,
|
||||
MaxSize: e.MaxSize,
|
||||
|
||||
SpotPercentage: e.SpotPercentage,
|
||||
FallbackToOnDemand: e.FallbackToOnDemand,
|
||||
UtilizeReservedInstances: e.UtilizeReservedInstances,
|
||||
DrainingTimeout: e.DrainingTimeout,
|
||||
|
@ -1266,11 +1250,6 @@ func (o *Ocean) buildTags() []*aws.Tag {
|
|||
}
|
||||
|
||||
func (o *Ocean) applyDefaults() {
|
||||
if o.SpotPercentage == nil {
|
||||
f := float64(100.0)
|
||||
o.SpotPercentage = &f
|
||||
}
|
||||
|
||||
if o.FallbackToOnDemand == nil {
|
||||
o.FallbackToOnDemand = fi.Bool(true)
|
||||
}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
|
||||
install: skip
|
||||
script:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/smartystreets/goconvey
|
||||
- mkdir -p $HOME/gopath/src/gopkg.in
|
||||
- ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
|
||||
- cd $HOME/gopath/src/gopkg.in/ini.v1
|
||||
- go test -v -cover -race
|
|
@ -6,7 +6,7 @@ test:
|
|||
go test -v -cover -race
|
||||
|
||||
bench:
|
||||
go test -v -cover -race -test.bench=. -test.benchmem
|
||||
go test -v -cover -test.bench=. -test.benchmem
|
||||
|
||||
vet:
|
||||
go vet
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
# INI
|
||||
|
||||
[](https://travis-ci.org/go-ini/ini) [](https://sourcegraph.com/github.com/go-ini/ini)
|
||||
[](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
|
||||
[](https://codecov.io/gh/go-ini/ini)
|
||||
[](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
|
||||
[](https://sourcegraph.com/github.com/go-ini/ini)
|
||||
|
||||

|
||||
|
||||
|
@ -8,7 +11,7 @@ Package ini provides INI file read and write functionality in Go.
|
|||
|
||||
## Features
|
||||
|
||||
- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
|
||||
- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
|
||||
- Read with recursion values.
|
||||
- Read with parent-child sections.
|
||||
- Read with auto-increment key names.
|
||||
|
@ -33,6 +36,7 @@ Please add `-u` flag to update in the future.
|
|||
|
||||
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
|
||||
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||
- 中国大陆镜像:https://ini.unknwon.cn
|
||||
|
||||
## License
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
coverage:
|
||||
range: "60...95"
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 1%
|
||||
|
||||
comment:
|
||||
layout: 'diff, files'
|
|
@ -68,6 +68,8 @@ func parseDataSource(source interface{}) (dataSource, error) {
|
|||
return &sourceData{s}, nil
|
||||
case io.ReadCloser:
|
||||
return &sourceReadCloser{s}, nil
|
||||
case io.Reader:
|
||||
return &sourceReadCloser{ioutil.NopCloser(s)}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// File represents a combination of a or more INI file(s) in memory.
|
||||
// File represents a combination of one or more INI files in memory.
|
||||
type File struct {
|
||||
options LoadOptions
|
||||
dataSources []dataSource
|
||||
|
@ -36,8 +36,12 @@ type File struct {
|
|||
|
||||
// To keep data in order.
|
||||
sectionList []string
|
||||
// To keep track of the index of a section with same name.
|
||||
// This meta list is only used with non-unique section names are allowed.
|
||||
sectionIndexes []int
|
||||
|
||||
// Actual data is stored here.
|
||||
sections map[string]*Section
|
||||
sections map[string][]*Section
|
||||
|
||||
NameMapper
|
||||
ValueMapper
|
||||
|
@ -48,27 +52,40 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
|||
if len(opts.KeyValueDelimiters) == 0 {
|
||||
opts.KeyValueDelimiters = "=:"
|
||||
}
|
||||
if len(opts.KeyValueDelimiterOnWrite) == 0 {
|
||||
opts.KeyValueDelimiterOnWrite = "="
|
||||
}
|
||||
if len(opts.ChildSectionDelimiter) == 0 {
|
||||
opts.ChildSectionDelimiter = "."
|
||||
}
|
||||
|
||||
return &File{
|
||||
BlockMode: true,
|
||||
dataSources: dataSources,
|
||||
sections: make(map[string]*Section),
|
||||
sectionList: make([]string, 0, 10),
|
||||
sections: make(map[string][]*Section),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns an empty file object.
|
||||
func Empty() *File {
|
||||
// Ignore error here, we sure our data is good.
|
||||
f, _ := Load([]byte(""))
|
||||
func Empty(opts ...LoadOptions) *File {
|
||||
var opt LoadOptions
|
||||
if len(opts) > 0 {
|
||||
opt = opts[0]
|
||||
}
|
||||
|
||||
// Ignore error here, we are sure our data is good.
|
||||
f, _ := LoadSources(opt, []byte(""))
|
||||
return f
|
||||
}
|
||||
|
||||
// NewSection creates a new section.
|
||||
func (f *File) NewSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new section: empty section name")
|
||||
} else if f.options.Insensitive && name != DefaultSection {
|
||||
return nil, errors.New("empty section name")
|
||||
}
|
||||
|
||||
if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
|
@ -77,13 +94,20 @@ func (f *File) NewSection(name string) (*Section, error) {
|
|||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, f.sectionList) {
|
||||
return f.sections[name], nil
|
||||
if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
|
||||
return f.sections[name][0], nil
|
||||
}
|
||||
|
||||
f.sectionList = append(f.sectionList, name)
|
||||
f.sections[name] = newSection(f, name)
|
||||
return f.sections[name], nil
|
||||
|
||||
// NOTE: Append to indexes must happen before appending to sections,
|
||||
// otherwise index will have off-by-one problem.
|
||||
f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
|
||||
|
||||
sec := newSection(f, name)
|
||||
f.sections[name] = append(f.sections[name], sec)
|
||||
|
||||
return sec, nil
|
||||
}
|
||||
|
||||
// NewRawSection creates a new section with an unparseable body.
|
||||
|
@ -110,10 +134,20 @@ func (f *File) NewSections(names ...string) (err error) {
|
|||
|
||||
// GetSection returns section by given name.
|
||||
func (f *File) GetSection(name string) (*Section, error) {
|
||||
secs, err := f.SectionsByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return secs[0], err
|
||||
}
|
||||
|
||||
// SectionsByName returns all sections with given name.
|
||||
func (f *File) SectionsByName(name string) ([]*Section, error) {
|
||||
if len(name) == 0 {
|
||||
name = DefaultSection
|
||||
}
|
||||
if f.options.Insensitive {
|
||||
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
|
@ -122,11 +156,12 @@ func (f *File) GetSection(name string) (*Section, error) {
|
|||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sec := f.sections[name]
|
||||
if sec == nil {
|
||||
return nil, fmt.Errorf("section '%s' does not exist", name)
|
||||
secs := f.sections[name]
|
||||
if len(secs) == 0 {
|
||||
return nil, fmt.Errorf("section %q does not exist", name)
|
||||
}
|
||||
return sec, nil
|
||||
|
||||
return secs, nil
|
||||
}
|
||||
|
||||
// Section assumes named section exists and returns a zero-value when not.
|
||||
|
@ -141,6 +176,19 @@ func (f *File) Section(name string) *Section {
|
|||
return sec
|
||||
}
|
||||
|
||||
// SectionWithIndex assumes named section exists and returns a new section when not.
|
||||
func (f *File) SectionWithIndex(name string, index int) *Section {
|
||||
secs, err := f.SectionsByName(name)
|
||||
if err != nil || len(secs) <= index {
|
||||
// NOTE: It's OK here because the only possible error is empty section name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
newSec, _ := f.NewSection(name)
|
||||
return newSec
|
||||
}
|
||||
|
||||
return secs[index]
|
||||
}
|
||||
|
||||
// Sections returns a list of Section stored in the current instance.
|
||||
func (f *File) Sections() []*Section {
|
||||
if f.BlockMode {
|
||||
|
@ -150,7 +198,7 @@ func (f *File) Sections() []*Section {
|
|||
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i, name := range f.sectionList {
|
||||
sections[i] = f.sections[name]
|
||||
sections[i] = f.sections[name][f.sectionIndexes[i]]
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
@ -167,24 +215,70 @@ func (f *File) SectionStrings() []string {
|
|||
return list
|
||||
}
|
||||
|
||||
// DeleteSection deletes a section.
|
||||
// DeleteSection deletes a section or all sections with given name.
|
||||
func (f *File) DeleteSection(name string) {
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
secs, err := f.SectionsByName(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(secs); i++ {
|
||||
// For non-unique sections, it is always needed to remove the first one so
|
||||
// in the next iteration, the subsequent section continue having index 0.
|
||||
// Ignoring the error as index 0 never returns an error.
|
||||
_ = f.DeleteSectionWithIndex(name, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteSectionWithIndex deletes a section with given name and index.
|
||||
func (f *File) DeleteSectionWithIndex(name string, index int) error {
|
||||
if !f.options.AllowNonUniqueSections && index != 0 {
|
||||
return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
name = DefaultSection
|
||||
}
|
||||
|
||||
for i, s := range f.sectionList {
|
||||
if s == name {
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
delete(f.sections, name)
|
||||
return
|
||||
}
|
||||
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
// Count occurrences of the sections
|
||||
occurrences := 0
|
||||
|
||||
sectionListCopy := make([]string, len(f.sectionList))
|
||||
copy(sectionListCopy, f.sectionList)
|
||||
|
||||
for i, s := range sectionListCopy {
|
||||
if s != name {
|
||||
continue
|
||||
}
|
||||
|
||||
if occurrences == index {
|
||||
if len(f.sections[name]) <= 1 {
|
||||
delete(f.sections, name) // The last one in the map
|
||||
} else {
|
||||
f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
|
||||
}
|
||||
|
||||
// Fix section lists
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
|
||||
|
||||
} else if occurrences > index {
|
||||
// Fix the indices of all following sections with this name.
|
||||
f.sectionIndexes[i-1]--
|
||||
}
|
||||
|
||||
occurrences++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) reload(s dataSource) error {
|
||||
|
@ -203,11 +297,14 @@ func (f *File) Reload() (err error) {
|
|||
if err = f.reload(s); err != nil {
|
||||
// In loose mode, we create an empty default section for nonexistent files.
|
||||
if os.IsNotExist(err) && f.options.Loose {
|
||||
f.parse(bytes.NewBuffer(nil))
|
||||
_ = f.parse(bytes.NewBuffer(nil))
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if f.options.ShortCircuit {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -230,16 +327,16 @@ func (f *File) Append(source interface{}, others ...interface{}) error {
|
|||
}
|
||||
|
||||
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
|
||||
equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
|
||||
|
||||
if PrettyFormat || PrettyEqual {
|
||||
equalSign = " = "
|
||||
equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
|
||||
}
|
||||
|
||||
// Use buffer to make sure target is safe until finish encoding.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
|
||||
if len(sec.Comment) > 0 {
|
||||
// Support multiline comments
|
||||
lines := strings.Split(sec.Comment, LineBreak)
|
||||
|
@ -256,7 +353,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
|
||||
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -282,7 +379,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
|||
}
|
||||
|
||||
// Count and generate alignment length and buffer spaces using the
|
||||
// longest key. Keys may be modifed if they contain certain characters so
|
||||
// longest key. Keys may be modified if they contain certain characters so
|
||||
// we need to take that into account in our calculation.
|
||||
alignLength := 0
|
||||
if PrettyFormat {
|
||||
|
@ -360,6 +457,8 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
|||
val = `"""` + val + `"""`
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
} else if len(strings.TrimSpace(val)) != len(val) {
|
||||
val = `"` + val + `"`
|
||||
}
|
||||
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
|
@ -403,7 +502,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
// SaveToIndent writes content to file system with given value indention.
|
||||
func (f *File) SaveToIndent(filename, indent string) error {
|
||||
// Note: Because we are truncating with os.Create,
|
||||
// so it's safer to save to a temporary file location and rename afte done.
|
||||
// so it's safer to save to a temporary file location and rename after done.
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -18,8 +18,10 @@
|
|||
package ini
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,14 +31,8 @@ const (
|
|||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
depthValues = 99
|
||||
version = "1.51.0"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
func Version() string {
|
||||
return version
|
||||
}
|
||||
|
||||
var (
|
||||
// LineBreak is the delimiter to determine or compose a new line.
|
||||
// This variable will be changed to "\r\n" automatically on Windows at package init time.
|
||||
|
@ -61,8 +57,10 @@ var (
|
|||
DefaultFormatRight = ""
|
||||
)
|
||||
|
||||
var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
if runtime.GOOS == "windows" && !inTest {
|
||||
LineBreak = "\r\n"
|
||||
}
|
||||
}
|
||||
|
@ -73,12 +71,18 @@ type LoadOptions struct {
|
|||
Loose bool
|
||||
// Insensitive indicates whether the parser forces all section and key names to lowercase.
|
||||
Insensitive bool
|
||||
// InsensitiveSections indicates whether the parser forces all section to lowercase.
|
||||
InsensitiveSections bool
|
||||
// InsensitiveKeys indicates whether the parser forces all key names to lowercase.
|
||||
InsensitiveKeys bool
|
||||
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
|
||||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||
SkipUnrecognizableLines bool
|
||||
// ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
|
||||
ShortCircuit bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
|
@ -109,12 +113,18 @@ type LoadOptions struct {
|
|||
UnparseableSections []string
|
||||
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
|
||||
KeyValueDelimiters string
|
||||
// KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
|
||||
KeyValueDelimiterOnWrite string
|
||||
// ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
|
||||
ChildSectionDelimiter string
|
||||
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
|
||||
PreserveSurroundedQuote bool
|
||||
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
|
||||
DebugFunc DebugFunc
|
||||
// ReaderBufferSize is the buffer size of the reader in bytes.
|
||||
ReaderBufferSize int
|
||||
// AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
|
||||
AllowNonUniqueSections bool
|
||||
}
|
||||
|
||||
// DebugFunc is the type of function called to log parse events.
|
||||
|
|
|
@ -686,99 +686,127 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
|
|||
// parseBools transforms strings to bools.
|
||||
func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
|
||||
vals := make([]bool, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := parseBool(str)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
return val, err
|
||||
}
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, val.(bool))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
// parseFloat64s transforms strings to float64s.
|
||||
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
|
||||
vals := make([]float64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
return val, err
|
||||
}
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, val.(float64))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
// parseInts transforms strings to ints.
|
||||
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
|
||||
vals := make([]int, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
valInt64, err := strconv.ParseInt(str, 0, 64)
|
||||
val := int(valInt64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := strconv.ParseInt(str, 0, 64)
|
||||
return val, err
|
||||
}
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, int(val.(int64)))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
// parseInt64s transforms strings to int64s.
|
||||
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
|
||||
vals := make([]int64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := strconv.ParseInt(str, 0, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
return val, err
|
||||
}
|
||||
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, val.(int64))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
// parseUints transforms strings to uints.
|
||||
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
|
||||
vals := make([]uint, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseUint(str, 0, 0)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, uint(val))
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := strconv.ParseUint(str, 0, 64)
|
||||
return val, err
|
||||
}
|
||||
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, uint(val.(uint64)))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
// parseUint64s transforms strings to uint64s.
|
||||
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
|
||||
vals := make([]uint64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := strconv.ParseUint(str, 0, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
return val, err
|
||||
}
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, val.(uint64))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
return vals, err
|
||||
}
|
||||
|
||||
|
||||
type Parser func(str string) (interface{}, error)
|
||||
|
||||
|
||||
// parseTimesFormat transforms strings to times in given format.
|
||||
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
|
||||
vals := make([]time.Time, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
parser := func(str string) (interface{}, error) {
|
||||
val, err := time.Parse(format, str)
|
||||
return val, err
|
||||
}
|
||||
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
|
||||
if err == nil {
|
||||
for _, val := range rawVals {
|
||||
vals = append(vals, val.(time.Time))
|
||||
}
|
||||
}
|
||||
return vals, err
|
||||
}
|
||||
|
||||
|
||||
// doParse transforms strings to different types
|
||||
func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
|
||||
vals := make([]interface{}, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := parser(str)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -84,7 +84,10 @@ func (p *parser) BOM() error {
|
|||
case mask[0] == 254 && mask[1] == 255:
|
||||
fallthrough
|
||||
case mask[0] == 255 && mask[1] == 254:
|
||||
p.buf.Read(mask)
|
||||
_, err = p.buf.Read(mask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case mask[0] == 239 && mask[1] == 187:
|
||||
mask, err := p.buf.Peek(3)
|
||||
if err != nil && err != io.EOF {
|
||||
|
@ -93,7 +96,10 @@ func (p *parser) BOM() error {
|
|||
return nil
|
||||
}
|
||||
if mask[2] == 191 {
|
||||
p.buf.Read(mask)
|
||||
_, err = p.buf.Read(mask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -135,7 +141,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) {
|
|||
}
|
||||
|
||||
// Get out key name
|
||||
endIdx := -1
|
||||
var endIdx int
|
||||
if len(keyQuote) > 0 {
|
||||
startIdx := len(keyQuote)
|
||||
// FIXME: fail case -> """"""name"""=value
|
||||
|
@ -181,7 +187,7 @@ func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
|
|||
}
|
||||
val += next
|
||||
if p.isEOF {
|
||||
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
|
||||
return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
|
||||
}
|
||||
}
|
||||
return val, nil
|
||||
|
@ -371,7 +377,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
|
||||
// Ignore error because default section name is never empty string.
|
||||
name := DefaultSection
|
||||
if f.options.Insensitive {
|
||||
if f.options.Insensitive || f.options.InsensitiveSections {
|
||||
name = strings.ToLower(DefaultSection)
|
||||
}
|
||||
section, _ := f.NewSection(name)
|
||||
|
@ -413,7 +419,10 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
if f.options.AllowNestedValues &&
|
||||
isLastValueEmpty && len(line) > 0 {
|
||||
if line[0] == ' ' || line[0] == '\t' {
|
||||
lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
|
||||
err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -453,14 +462,14 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
|
||||
section.Comment = strings.TrimSpace(p.comment.String())
|
||||
|
||||
// Reset aotu-counter and comments
|
||||
// Reset auto-counter and comments
|
||||
p.comment.Reset()
|
||||
p.count = 1
|
||||
|
||||
inUnparseableSection = false
|
||||
for i := range f.options.UnparseableSections {
|
||||
if f.options.UnparseableSections[i] == name ||
|
||||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
|
||||
((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
|
||||
inUnparseableSection = true
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) {
|
|||
func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new key: empty key name")
|
||||
} else if s.f.options.Insensitive {
|
||||
} else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
|
|||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
}
|
||||
if s.f.options.Insensitive {
|
||||
if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
key := s.keys[name]
|
||||
|
@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
|
|||
// Check if it is a child-section.
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
|
@ -131,7 +131,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
|
|||
}
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
|
||||
return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key {
|
|||
var parentKeys []*Key
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
|
@ -245,11 +245,11 @@ func (s *Section) DeleteKey(name string) {
|
|||
// For example, "[parent.child1]" and "[parent.child12]" are child sections
|
||||
// of section "[parent]".
|
||||
func (s *Section) ChildSections() []*Section {
|
||||
prefix := s.name + "."
|
||||
prefix := s.name + s.f.options.ChildSectionDelimiter
|
||||
children := make([]*Section, 0, 3)
|
||||
for _, name := range s.f.sectionList {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
children = append(children, s.f.sections[name])
|
||||
children = append(children, s.f.sections[name]...)
|
||||
}
|
||||
}
|
||||
return children
|
||||
|
|
|
@ -183,6 +183,10 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
|||
if vt.Name() == "Duration" {
|
||||
durationVal, err := key.Duration()
|
||||
if err != nil {
|
||||
if intVal, err := key.Int64(); err == nil {
|
||||
field.SetInt(intVal)
|
||||
return nil
|
||||
}
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
if isPtr {
|
||||
|
@ -254,24 +258,26 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
|||
case reflect.Slice:
|
||||
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
return fmt.Errorf("unsupported type %q", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
|
||||
opts := strings.SplitN(tag, ",", 3)
|
||||
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
|
||||
opts := strings.SplitN(tag, ",", 5)
|
||||
rawName = opts[0]
|
||||
if len(opts) > 1 {
|
||||
omitEmpty = opts[1] == "omitempty"
|
||||
for _, opt := range opts[1:] {
|
||||
omitEmpty = omitEmpty || (opt == "omitempty")
|
||||
allowShadow = allowShadow || (opt == "allowshadow")
|
||||
allowNonUnique = allowNonUnique || (opt == "nonunique")
|
||||
extends = extends || (opt == "extends")
|
||||
}
|
||||
if len(opts) > 2 {
|
||||
allowShadow = opts[2] == "allowshadow"
|
||||
}
|
||||
return rawName, omitEmpty, allowShadow
|
||||
return rawName, omitEmpty, allowShadow, allowNonUnique, extends
|
||||
}
|
||||
|
||||
func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
||||
// mapToField maps the given value to the matching field of the given section.
|
||||
// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
|
||||
func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
|
@ -286,7 +292,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
|||
continue
|
||||
}
|
||||
|
||||
rawName, _, allowShadow := parseTagOptions(tag)
|
||||
rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
|
||||
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
|
@ -294,62 +300,116 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
|||
|
||||
isStruct := tpField.Type.Kind() == reflect.Struct
|
||||
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
|
||||
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||
if isAnonymous {
|
||||
isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||
if isAnonymousPtr {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
|
||||
if isAnonymous || isStruct || isStructPtr {
|
||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
||||
// Only set the field to non-nil struct value if we have
|
||||
// a section for it. Otherwise, we end up with a non-nil
|
||||
// struct ptr even though there is no data.
|
||||
if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
|
||||
if isStructPtr && field.IsNil() {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
fieldSection := s
|
||||
if rawName != "" {
|
||||
sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
|
||||
if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
|
||||
fieldSection = secs[sectionIndex]
|
||||
}
|
||||
}
|
||||
if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
|
||||
return fmt.Errorf("map to field %q: %v", fieldName, err)
|
||||
}
|
||||
} else if isAnonymousPtr || isStruct || isStructPtr {
|
||||
if secs, err := s.f.SectionsByName(fieldName); err == nil {
|
||||
if len(secs) <= sectionIndex {
|
||||
return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
|
||||
}
|
||||
// Only set the field to non-nil struct value if we have a section for it.
|
||||
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
|
||||
if isStructPtr && field.IsNil() {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
if err = sec.mapTo(field, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
|
||||
return fmt.Errorf("map to field %q: %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Map non-unique sections
|
||||
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
|
||||
newField, err := s.mapToSlice(fieldName, field, isStrict)
|
||||
if err != nil {
|
||||
return fmt.Errorf("map to slice %q: %v", fieldName, err)
|
||||
}
|
||||
|
||||
field.Set(newField)
|
||||
continue
|
||||
}
|
||||
|
||||
if key, err := s.GetKey(fieldName); err == nil {
|
||||
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
return fmt.Errorf("set field %q: %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct.
|
||||
func (s *Section) MapTo(v interface{}) error {
|
||||
// mapToSlice maps all sections with the same name and returns the new value.
|
||||
// The type of the Value must be a slice.
|
||||
func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
|
||||
secs, err := s.f.SectionsByName(secName)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
|
||||
typ := val.Type().Elem()
|
||||
for i, sec := range secs {
|
||||
elem := reflect.New(typ)
|
||||
if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
|
||||
return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
|
||||
}
|
||||
|
||||
val = reflect.Append(val, elem.Elem())
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// mapTo maps a section to object v.
|
||||
func (s *Section) mapTo(v interface{}, isStrict bool) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
return errors.New("not a pointer to a struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, false)
|
||||
if typ.Kind() == reflect.Slice {
|
||||
newField, err := s.mapToSlice(s.name, val, isStrict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val.Set(newField)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.mapToField(val, isStrict, 0, s.name)
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct.
|
||||
func (s *Section) MapTo(v interface{}) error {
|
||||
return s.mapTo(v, false)
|
||||
}
|
||||
|
||||
// StrictMapTo maps section to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (s *Section) StrictMapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, true)
|
||||
return s.mapTo(v, true)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct.
|
||||
|
@ -427,10 +487,10 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all
|
|||
if i == 0 {
|
||||
keyWithShadows = newKey(key.s, key.name, val)
|
||||
} else {
|
||||
keyWithShadows.AddShadow(val)
|
||||
_ = keyWithShadows.AddShadow(val)
|
||||
}
|
||||
}
|
||||
key = keyWithShadows
|
||||
*key = *keyWithShadows
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -480,7 +540,7 @@ func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim
|
|||
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
return fmt.Errorf("unsupported type %q", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -508,6 +568,11 @@ func isEmptyValue(v reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
|
||||
type StructReflector interface {
|
||||
ReflectINIStruct(*File) error
|
||||
}
|
||||
|
||||
func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
|
@ -515,6 +580,10 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
|||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
if !val.Field(i).CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
|
@ -523,17 +592,28 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
|||
continue
|
||||
}
|
||||
|
||||
rawName, omitEmpty, allowShadow := parseTagOptions(tag)
|
||||
rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
|
||||
if omitEmpty && isEmptyValue(field) {
|
||||
continue
|
||||
}
|
||||
|
||||
if r, ok := field.Interface().(StructReflector); ok {
|
||||
return r.ReflectINIStruct(s.f)
|
||||
}
|
||||
|
||||
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
|
||||
if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
|
||||
if err := s.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
|
||||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
|
||||
// Note: The only error here is section doesn't exist.
|
||||
sec, err := s.f.GetSection(fieldName)
|
||||
|
@ -548,12 +628,41 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
|||
}
|
||||
|
||||
if err = sec.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: Same reason as secion.
|
||||
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
|
||||
slice := field.Slice(0, field.Len())
|
||||
if field.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
|
||||
return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
|
||||
}
|
||||
|
||||
sec, err := s.f.NewSection(fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add comment from comment tag
|
||||
if len(sec.Comment) == 0 {
|
||||
sec.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err := sec.reflectFrom(slice.Index(i)); err != nil {
|
||||
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: Same reason as section.
|
||||
key, err := s.GetKey(fieldName)
|
||||
if err != nil {
|
||||
key, _ = s.NewKey(fieldName, "")
|
||||
|
@ -564,23 +673,58 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
|||
key.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||
if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
|
||||
return fmt.Errorf("reflect field %q: %v", fieldName, err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReflectFrom reflects secion from given struct.
|
||||
// ReflectFrom reflects section from given struct. It overwrites existing ones.
|
||||
func (s *Section) ReflectFrom(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
|
||||
if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
|
||||
(typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
|
||||
// Clear sections to make sure none exists before adding the new ones
|
||||
s.f.DeleteSection(s.name)
|
||||
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
sec, err := s.f.NewSection(s.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sec.reflectFrom(val.Elem())
|
||||
}
|
||||
|
||||
slice := val.Slice(0, val.Len())
|
||||
sliceOf := val.Type().Elem().Kind()
|
||||
if sliceOf != reflect.Ptr {
|
||||
return fmt.Errorf("not a slice of pointers")
|
||||
}
|
||||
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
sec, err := s.f.NewSection(s.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = sec.reflectFrom(slice.Index(i))
|
||||
if err != nil {
|
||||
return fmt.Errorf("reflect from %dth field: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot reflect from non-pointer struct")
|
||||
return errors.New("not a pointer to a struct")
|
||||
}
|
||||
|
||||
return s.reflectFrom(val)
|
||||
|
|
305
vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go
generated
vendored
305
vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go
generated
vendored
|
@ -132,7 +132,7 @@ type AutoScale struct {
|
|||
}
|
||||
|
||||
type AutoScaleECS struct {
|
||||
AutoScale // embedding
|
||||
AutoScale
|
||||
Attributes []*AutoScaleAttributes `json:"attributes,omitempty"`
|
||||
ShouldScaleDownNonServiceTasks *bool `json:"shouldScaleDownNonServiceTasks,omitempty"`
|
||||
|
||||
|
@ -141,15 +141,15 @@ type AutoScaleECS struct {
|
|||
}
|
||||
|
||||
type AutoScaleKubernetes struct {
|
||||
AutoScale // embedding
|
||||
Labels []*AutoScaleLabel `json:"labels,omitempty"`
|
||||
AutoScale
|
||||
Labels []*AutoScaleLabel `json:"labels,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type AutoScaleNomad struct {
|
||||
AutoScale // embedding
|
||||
AutoScale
|
||||
Constraints []*AutoScaleConstraint `json:"constraints,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
|
@ -157,7 +157,7 @@ type AutoScaleNomad struct {
|
|||
}
|
||||
|
||||
type AutoScaleDockerSwarm struct {
|
||||
AutoScale // embedding
|
||||
AutoScale
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -286,6 +286,14 @@ type RancherIntegration struct {
|
|||
type EC2ContainerServiceIntegration struct {
|
||||
ClusterName *string `json:"clusterName,omitempty"`
|
||||
AutoScale *AutoScaleECS `json:"autoScale,omitempty"`
|
||||
Batch *Batch `json:"batch,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type Batch struct {
|
||||
JobQueueNames []string `json:"jobQueueNames,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -490,6 +498,7 @@ type Strategy struct {
|
|||
Persistence *Persistence `json:"persistence,omitempty"`
|
||||
RevertToSpot *RevertToSpot `json:"revertToSpot,omitempty"`
|
||||
ScalingStrategy *ScalingStrategy `json:"scalingStrategy,omitempty"`
|
||||
UtilizeCommitments *bool `json:"utilizeCommitments,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -608,6 +617,7 @@ type LaunchSpecification struct {
|
|||
BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
|
||||
NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
MetadataOptions *MetadataOptions `json:"metadataOptions,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -666,6 +676,7 @@ type EBS struct {
|
|||
VolumeType *string `json:"volumeType,omitempty"`
|
||||
VolumeSize *int `json:"volumeSize,omitempty"`
|
||||
IOPS *int `json:"iops,omitempty"`
|
||||
Throughput *int `json:"throughput,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -897,8 +908,8 @@ type RollGroupStatus struct {
|
|||
}
|
||||
|
||||
type Progress struct {
|
||||
Unit *string `json:"unit,omitempty"`
|
||||
Value *int `json:"value,omitempty"`
|
||||
Unit *string `json:"unit,omitempty"`
|
||||
Value *float64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type StopDeploymentInput struct {
|
||||
|
@ -909,6 +920,14 @@ type StopDeploymentInput struct {
|
|||
|
||||
type StopDeploymentOutput struct{}
|
||||
|
||||
type MetadataOptions struct {
|
||||
HTTPTokens *string `json:"httpTokens,omitempty"`
|
||||
HTTPPutResponseHopLimit *int `json:"httpPutResponseHopLimit,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
func deploymentStatusFromJSON(in []byte) (*RollGroupStatus, error) {
|
||||
b := new(RollGroupStatus)
|
||||
if err := json.Unmarshal(in, b); err != nil {
|
||||
|
@ -1980,12 +1999,6 @@ func (o *EC2ContainerServiceIntegration) SetClusterName(v *string) *EC2Container
|
|||
return o
|
||||
}
|
||||
|
||||
func (o AutoScaleECS) MarshalJSON() ([]byte, error) {
|
||||
type noMethod AutoScaleECS
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *EC2ContainerServiceIntegration) SetAutoScale(v *AutoScaleECS) *EC2ContainerServiceIntegration {
|
||||
if o.AutoScale = v; o.AutoScale == nil {
|
||||
o.nullFields = append(o.nullFields, "AutoScale")
|
||||
|
@ -1993,6 +2006,23 @@ func (o *EC2ContainerServiceIntegration) SetAutoScale(v *AutoScaleECS) *EC2Conta
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *EC2ContainerServiceIntegration) SetBatch(v *Batch) *EC2ContainerServiceIntegration {
|
||||
if o.Batch = v; o.Batch == nil {
|
||||
o.nullFields = append(o.nullFields, "Batch")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region AutoScaleECS
|
||||
|
||||
func (o AutoScaleECS) MarshalJSON() ([]byte, error) {
|
||||
type noMethod AutoScaleECS
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *AutoScaleECS) SetAttributes(v []*AutoScaleAttributes) *AutoScaleECS {
|
||||
if o.Attributes = v; o.Attributes == nil {
|
||||
o.nullFields = append(o.nullFields, "Attributes")
|
||||
|
@ -2009,7 +2039,24 @@ func (o *AutoScaleECS) SetShouldScaleDownNonServiceTasks(v *bool) *AutoScaleECS
|
|||
|
||||
// endregion
|
||||
|
||||
// region Docker Swarm
|
||||
// region Batch
|
||||
|
||||
func (o Batch) MarshalJSON() ([]byte, error) {
|
||||
type noMethod Batch
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *Batch) SetJobQueueNames(v []string) *Batch {
|
||||
if o.JobQueueNames = v; o.JobQueueNames == nil {
|
||||
o.nullFields = append(o.nullFields, "JobQueueNames")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region DockerSwarmIntegration
|
||||
|
||||
func (o DockerSwarmIntegration) MarshalJSON() ([]byte, error) {
|
||||
type noMethod DockerSwarmIntegration
|
||||
|
@ -3010,6 +3057,13 @@ func (o *Strategy) SetScalingStrategy(v *ScalingStrategy) *Strategy {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *Strategy) SetUtilizeCommitments(v *bool) *Strategy {
|
||||
if o.UtilizeCommitments = v; o.UtilizeCommitments == nil {
|
||||
o.nullFields = append(o.nullFields, "UtilizeCommitments")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region ScalingStrategy
|
||||
|
@ -3486,6 +3540,13 @@ func (o *LaunchSpecification) SetTags(v []*Tag) *LaunchSpecification {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *LaunchSpecification) SetMetadataOptions(v *MetadataOptions) *LaunchSpecification {
|
||||
if o.MetadataOptions = v; o.MetadataOptions == nil {
|
||||
o.nullFields = append(o.nullFields, "MetadataOptions")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region LoadBalancersConfig
|
||||
|
@ -3732,6 +3793,13 @@ func (o *EBS) SetIOPS(v *int) *EBS {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *EBS) SetThroughput(v *int) *EBS {
|
||||
if o.Throughput = v; o.Throughput == nil {
|
||||
o.nullFields = append(o.nullFields, "Throughput")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region IAMInstanceProfile
|
||||
|
@ -4037,4 +4105,211 @@ func (s *ServiceOp) Scale(ctx context.Context, input *ScaleGroupInput) (*ScaleGr
|
|||
return output, err
|
||||
}
|
||||
|
||||
//endregion
|
||||
// endregion
|
||||
|
||||
// region SuspendProcesses
|
||||
|
||||
type SuspendProcesses struct {
|
||||
Suspensions []*Suspension `json:"suspensions,omitempty"`
|
||||
Processes []string `json:"processes,omitempty"`
|
||||
}
|
||||
|
||||
type Suspension struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
TTLInMinutes *int `json:"ttlInMinutes,omitempty"`
|
||||
|
||||
// Read-only fields.
|
||||
ExpiresAt *time.Time `json:"expiresAt,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type CreateSuspensionsInput struct {
|
||||
GroupID *string `json:"groupId,omitempty"`
|
||||
Suspensions []*Suspension `json:"suspensions,omitempty"`
|
||||
}
|
||||
|
||||
type CreateSuspensionsOutput struct {
|
||||
SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"`
|
||||
}
|
||||
|
||||
type ListSuspensionsInput struct {
|
||||
GroupID *string `json:"groupId,omitempty"`
|
||||
}
|
||||
|
||||
type ListSuspensionsOutput struct {
|
||||
SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteSuspensionsInput struct {
|
||||
GroupID *string `json:"groupId,omitempty"`
|
||||
Processes []string `json:"processes,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteSuspensionsOutput struct{}
|
||||
|
||||
func suspendProcessesFromHttpResponse(resp *http.Response) ([]*SuspendProcesses, error) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return suspendProcessesFromJSON(body)
|
||||
}
|
||||
|
||||
func suspendProcessesObjFromJSON(in []byte) (*SuspendProcesses, error) {
|
||||
v := new(SuspendProcesses)
|
||||
if err := json.Unmarshal(in, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func suspendProcessesFromJSON(in []byte) ([]*SuspendProcesses, error) {
|
||||
var rw client.Response
|
||||
if err := json.Unmarshal(in, &rw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]*SuspendProcesses, len(rw.Response.Items))
|
||||
if len(out) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
for i, rb := range rw.Response.Items {
|
||||
v, err := suspendProcessesObjFromJSON(rb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i] = v
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *ServiceOp) CreateSuspensions(ctx context.Context, input *CreateSuspensionsInput) (*CreateSuspensionsOutput, error) {
|
||||
path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
|
||||
"groupId": spotinst.StringValue(input.GroupID),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We do not need the ID anymore so let's drop it.
|
||||
input.GroupID = nil
|
||||
|
||||
r := client.NewRequest(http.MethodPost, path)
|
||||
r.Obj = input
|
||||
|
||||
resp, err := client.RequireOK(s.Client.Do(ctx, r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
suspendProcesses, err := suspendProcessesFromHttpResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := new(CreateSuspensionsOutput)
|
||||
if len(suspendProcesses) > 0 {
|
||||
output.SuspendProcesses = suspendProcesses[0]
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (s *ServiceOp) ListSuspensions(ctx context.Context, input *ListSuspensionsInput) (*ListSuspensionsOutput, error) {
|
||||
path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
|
||||
"groupId": spotinst.StringValue(input.GroupID),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := client.NewRequest(http.MethodGet, path)
|
||||
resp, err := client.RequireOK(s.Client.Do(ctx, r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
suspendProcesses, err := suspendProcessesFromHttpResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := new(ListSuspensionsOutput)
|
||||
if len(suspendProcesses) > 0 {
|
||||
output.SuspendProcesses = suspendProcesses[0]
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (s *ServiceOp) DeleteSuspensions(ctx context.Context, input *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error) {
|
||||
path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
|
||||
"groupId": spotinst.StringValue(input.GroupID),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We do not need the ID anymore so let's drop it.
|
||||
input.GroupID = nil
|
||||
|
||||
r := client.NewRequest(http.MethodDelete, path)
|
||||
r.Obj = input
|
||||
|
||||
resp, err := client.RequireOK(s.Client.Do(ctx, r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return &DeleteSuspensionsOutput{}, nil
|
||||
}
|
||||
|
||||
func (o Suspension) MarshalJSON() ([]byte, error) {
|
||||
type noMethod Suspension
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *Suspension) SetName(v *string) *Suspension {
|
||||
if o.Name = v; o.Name == nil {
|
||||
o.nullFields = append(o.nullFields, "Name")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Suspension) SetTTLInMinutes(v *int) *Suspension {
|
||||
if o.TTLInMinutes = v; o.TTLInMinutes == nil {
|
||||
o.nullFields = append(o.nullFields, "TTLInMinutes")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region MetadataOptions
|
||||
|
||||
func (o MetadataOptions) MarshalJSON() ([]byte, error) {
|
||||
type noMethod MetadataOptions
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *MetadataOptions) SetHTTPTokens(v *string) *MetadataOptions {
|
||||
if o.HTTPTokens = v; o.HTTPTokens == nil {
|
||||
o.nullFields = append(o.nullFields, "HTTPTokens")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *MetadataOptions) SetHTTPPutResponseHopLimit(v *int) *MetadataOptions {
|
||||
if o.HTTPPutResponseHopLimit = v; o.HTTPPutResponseHopLimit == nil {
|
||||
o.nullFields = append(o.nullFields, "HTTPPutResponseHopLimit")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
|
|
@ -31,6 +31,9 @@ type Service interface {
|
|||
StartBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error)
|
||||
FinishBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error)
|
||||
GetBeanstalkMaintenanceStatus(context.Context, *BeanstalkMaintenanceInput) (*string, error)
|
||||
CreateSuspensions(context.Context, *CreateSuspensionsInput) (*CreateSuspensionsOutput, error)
|
||||
ListSuspensions(context.Context, *ListSuspensionsInput) (*ListSuspensionsOutput, error)
|
||||
DeleteSuspensions(context.Context, *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error)
|
||||
}
|
||||
|
||||
type ServiceOp struct {
|
||||
|
|
95
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/launch_spec.go
generated
vendored
95
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/launch_spec.go
generated
vendored
|
@ -14,24 +14,25 @@ import (
|
|||
)
|
||||
|
||||
type LaunchSpec struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
RootVolumeSize *int `json:"rootVolumeSize,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
SubnetIDs []string `json:"subnetIds,omitempty"`
|
||||
InstanceTypes []string `json:"instanceTypes,omitempty"`
|
||||
ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"`
|
||||
IAMInstanceProfile *IAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
AutoScale *AutoScale `json:"autoScale,omitempty"`
|
||||
ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"`
|
||||
BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
|
||||
Labels []*Label `json:"labels,omitempty"`
|
||||
Taints []*Taint `json:"taints,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
RootVolumeSize *int `json:"rootVolumeSize,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
SubnetIDs []string `json:"subnetIds,omitempty"`
|
||||
InstanceTypes []string `json:"instanceTypes,omitempty"`
|
||||
Strategy *LaunchSpecStrategy `json:"strategy,omitempty"`
|
||||
ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"`
|
||||
IAMInstanceProfile *IAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
AutoScale *AutoScale `json:"autoScale,omitempty"`
|
||||
ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"`
|
||||
BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
|
||||
Labels []*Label `json:"labels,omitempty"`
|
||||
Taints []*Taint `json:"taints,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"`
|
||||
// Read-only fields.
|
||||
CreatedAt *time.Time `json:"createdAt,omitempty"`
|
||||
UpdatedAt *time.Time `json:"updatedAt,omitempty"`
|
||||
|
@ -78,6 +79,7 @@ type EBS struct {
|
|||
VolumeType *string `json:"volumeType,omitempty"`
|
||||
IOPS *int `json:"iops,omitempty"`
|
||||
VolumeSize *int `json:"volumeSize,omitempty"`
|
||||
Throughput *int `json:"throughput,omitempty"`
|
||||
DynamicVolumeSize *DynamicVolumeSize `json:"dynamicVolumeSize,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
|
@ -142,6 +144,13 @@ type TagSelector struct {
|
|||
nullFields []string
|
||||
}
|
||||
|
||||
type LaunchSpecStrategy struct {
|
||||
SpotPercentage *int `json:"spotPercentage,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type ListLaunchSpecsInput struct {
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
}
|
||||
|
@ -468,9 +477,23 @@ func (o *LaunchSpec) SetResourceLimits(v *ResourceLimits) *LaunchSpec {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *LaunchSpec) SetStrategy(v *LaunchSpecStrategy) *LaunchSpec {
|
||||
if o.Strategy = v; o.Strategy == nil {
|
||||
o.nullFields = append(o.nullFields, "Strategy")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *LaunchSpec) SetAssociatePublicIPAddress(v *bool) *LaunchSpec {
|
||||
if o.AssociatePublicIPAddress = v; o.AssociatePublicIPAddress == nil {
|
||||
o.nullFields = append(o.nullFields, "AssociatePublicIPAddress")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region BlockDeviceMappings
|
||||
// region BlockDeviceMapping
|
||||
|
||||
func (o BlockDeviceMapping) MarshalJSON() ([]byte, error) {
|
||||
type noMethod BlockDeviceMapping
|
||||
|
@ -572,9 +595,16 @@ func (o *EBS) SetDynamicVolumeSize(v *DynamicVolumeSize) *EBS {
|
|||
return o
|
||||
}
|
||||
|
||||
// end region
|
||||
func (o *EBS) SetThroughput(v *int) *EBS {
|
||||
if o.Throughput = v; o.Throughput == nil {
|
||||
o.nullFields = append(o.nullFields, "Throughput")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// region Dynamic Volume Size
|
||||
// endregion
|
||||
|
||||
// region DynamicVolumeSize
|
||||
|
||||
func (o DynamicVolumeSize) MarshalJSON() ([]byte, error) {
|
||||
type noMethod DynamicVolumeSize
|
||||
|
@ -603,7 +633,7 @@ func (o *DynamicVolumeSize) SetSizePerResourceUnit(v *int) *DynamicVolumeSize {
|
|||
return o
|
||||
}
|
||||
|
||||
// end region
|
||||
// endregion
|
||||
|
||||
// region ResourceLimits
|
||||
|
||||
|
@ -677,7 +707,7 @@ func (o *Taint) SetEffect(v *string) *Taint {
|
|||
|
||||
// endregion
|
||||
|
||||
//region AutoScale
|
||||
// region AutoScale
|
||||
|
||||
func (o AutoScale) MarshalJSON() ([]byte, error) {
|
||||
type noMethod AutoScale
|
||||
|
@ -692,7 +722,7 @@ func (o *AutoScale) SetHeadrooms(v []*AutoScaleHeadroom) *AutoScale {
|
|||
return o
|
||||
}
|
||||
|
||||
//endregion
|
||||
// endregion
|
||||
|
||||
// region AutoScaleHeadroom
|
||||
|
||||
|
@ -772,3 +802,20 @@ func (o *TagSelector) SetTagValue(v *string) *TagSelector {
|
|||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region Strategy
|
||||
|
||||
func (o LaunchSpecStrategy) MarshalJSON() ([]byte, error) {
|
||||
type noMethod LaunchSpecStrategy
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *LaunchSpecStrategy) SetSpotPercentage(v *int) *LaunchSpecStrategy {
|
||||
if o.SpotPercentage = v; o.SpotPercentage == nil {
|
||||
o.nullFields = append(o.nullFields, "SpotPercentage")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
|
214
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/launch_spec_ecs.go
generated
vendored
214
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/launch_spec_ecs.go
generated
vendored
|
@ -14,16 +14,18 @@ import (
|
|||
)
|
||||
|
||||
type ECSLaunchSpec struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
IAMInstanceProfile *ECSIAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
Attributes []*ECSAttribute `json:"attributes,omitempty"`
|
||||
AutoScale *ECSAutoScale `json:"autoScale,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
AutoScale *ECSAutoScale `json:"autoScale,omitempty"`
|
||||
IAMInstanceProfile *ECSIAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
Attributes []*ECSAttribute `json:"attributes,omitempty"`
|
||||
BlockDeviceMappings []*ECSBlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
InstanceTypes []string `json:"instanceTypes,omitempty"`
|
||||
|
||||
// Read-only fields.
|
||||
CreatedAt *time.Time `json:"createdAt,omitempty"`
|
||||
|
@ -70,6 +72,40 @@ type ECSAutoScaleHeadroom struct {
|
|||
nullFields []string
|
||||
}
|
||||
|
||||
type ECSBlockDeviceMapping struct {
|
||||
DeviceName *string `json:"deviceName,omitempty"`
|
||||
NoDevice *string `json:"noDevice,omitempty"`
|
||||
VirtualName *string `json:"virtualName,omitempty"`
|
||||
EBS *ECSEBS `json:"ebs,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type ECSEBS struct {
|
||||
DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"`
|
||||
Encrypted *bool `json:"encrypted,omitempty"`
|
||||
KMSKeyID *string `json:"kmsKeyId,omitempty"`
|
||||
SnapshotID *string `json:"snapshotId,omitempty"`
|
||||
VolumeType *string `json:"volumeType,omitempty"`
|
||||
IOPS *int `json:"iops,omitempty"`
|
||||
VolumeSize *int `json:"volumeSize,omitempty"`
|
||||
Throughput *int `json:"throughput,omitempty"`
|
||||
DynamicVolumeSize *ECSDynamicVolumeSize `json:"dynamicVolumeSize,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type ECSDynamicVolumeSize struct {
|
||||
BaseSize *int `json:"baseSize,omitempty"`
|
||||
SizePerResourceUnit *int `json:"sizePerResourceUnit,omitempty"`
|
||||
Resource *string `json:"resource,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type ListECSLaunchSpecsInput struct {
|
||||
OceanID *string `json:"oceanId,omitempty"`
|
||||
}
|
||||
|
@ -340,6 +376,13 @@ func (o *ECSLaunchSpec) SetAutoScale(v *ECSAutoScale) *ECSLaunchSpec {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *ECSLaunchSpec) SetBlockDeviceMappings(v []*ECSBlockDeviceMapping) *ECSLaunchSpec {
|
||||
if o.BlockDeviceMappings = v; o.BlockDeviceMappings == nil {
|
||||
o.nullFields = append(o.nullFields, "BlockDeviceMappings")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSLaunchSpec) SetTags(v []*Tag) *ECSLaunchSpec {
|
||||
if o.Tags = v; o.Tags == nil {
|
||||
o.nullFields = append(o.nullFields, "Tags")
|
||||
|
@ -347,6 +390,13 @@ func (o *ECSLaunchSpec) SetTags(v []*Tag) *ECSLaunchSpec {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *ECSLaunchSpec) SetInstanceTypes(v []string) *ECSLaunchSpec {
|
||||
if o.InstanceTypes = v; o.InstanceTypes == nil {
|
||||
o.nullFields = append(o.nullFields, "InstanceTypes")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region Attributes
|
||||
|
@ -373,7 +423,7 @@ func (o *ECSAttribute) SetValue(v *string) *ECSAttribute {
|
|||
|
||||
// endregion
|
||||
|
||||
//region AutoScale
|
||||
// region AutoScale
|
||||
|
||||
func (o ECSAutoScale) MarshalJSON() ([]byte, error) {
|
||||
type noMethod ECSAutoScale
|
||||
|
@ -420,3 +470,145 @@ func (o *ECSAutoScaleHeadroom) SetNumOfUnits(v *int) *ECSAutoScaleHeadroom {
|
|||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region ECSBlockDeviceMapping
|
||||
|
||||
func (o ECSBlockDeviceMapping) MarshalJSON() ([]byte, error) {
|
||||
type noMethod ECSBlockDeviceMapping
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *ECSBlockDeviceMapping) SetDeviceName(v *string) *ECSBlockDeviceMapping {
|
||||
if o.DeviceName = v; o.DeviceName == nil {
|
||||
o.nullFields = append(o.nullFields, "DeviceName")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSBlockDeviceMapping) SetNoDevice(v *string) *ECSBlockDeviceMapping {
|
||||
if o.NoDevice = v; o.NoDevice == nil {
|
||||
o.nullFields = append(o.nullFields, "NoDevice")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSBlockDeviceMapping) SetVirtualName(v *string) *ECSBlockDeviceMapping {
|
||||
if o.VirtualName = v; o.VirtualName == nil {
|
||||
o.nullFields = append(o.nullFields, "VirtualName")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSBlockDeviceMapping) SetEBS(v *ECSEBS) *ECSBlockDeviceMapping {
|
||||
if o.EBS = v; o.EBS == nil {
|
||||
o.nullFields = append(o.nullFields, "EBS")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region ECSEBS
|
||||
|
||||
func (o ECSEBS) MarshalJSON() ([]byte, error) {
|
||||
type noMethod ECSEBS
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetEncrypted(v *bool) *ECSEBS {
|
||||
if o.Encrypted = v; o.Encrypted == nil {
|
||||
o.nullFields = append(o.nullFields, "Encrypted")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetIOPS(v *int) *ECSEBS {
|
||||
if o.IOPS = v; o.IOPS == nil {
|
||||
o.nullFields = append(o.nullFields, "IOPS")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetKMSKeyId(v *string) *ECSEBS {
|
||||
if o.KMSKeyID = v; o.KMSKeyID == nil {
|
||||
o.nullFields = append(o.nullFields, "KMSKeyID")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetSnapshotId(v *string) *ECSEBS {
|
||||
if o.SnapshotID = v; o.SnapshotID == nil {
|
||||
o.nullFields = append(o.nullFields, "SnapshotID")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetVolumeType(v *string) *ECSEBS {
|
||||
if o.VolumeType = v; o.VolumeType == nil {
|
||||
o.nullFields = append(o.nullFields, "VolumeType")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetDeleteOnTermination(v *bool) *ECSEBS {
|
||||
if o.DeleteOnTermination = v; o.DeleteOnTermination == nil {
|
||||
o.nullFields = append(o.nullFields, "DeleteOnTermination")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetVolumeSize(v *int) *ECSEBS {
|
||||
if o.VolumeSize = v; o.VolumeSize == nil {
|
||||
o.nullFields = append(o.nullFields, "VolumeSize")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetDynamicVolumeSize(v *ECSDynamicVolumeSize) *ECSEBS {
|
||||
if o.DynamicVolumeSize = v; o.DynamicVolumeSize == nil {
|
||||
o.nullFields = append(o.nullFields, "DynamicVolumeSize")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSEBS) SetThroughput(v *int) *ECSEBS {
|
||||
if o.Throughput = v; o.Throughput == nil {
|
||||
o.nullFields = append(o.nullFields, "Throughput")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region ECSDynamicVolumeSize
|
||||
|
||||
func (o ECSDynamicVolumeSize) MarshalJSON() ([]byte, error) {
|
||||
type noMethod ECSDynamicVolumeSize
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *ECSDynamicVolumeSize) SetBaseSize(v *int) *ECSDynamicVolumeSize {
|
||||
if o.BaseSize = v; o.BaseSize == nil {
|
||||
o.nullFields = append(o.nullFields, "BaseSize")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSDynamicVolumeSize) SetResource(v *string) *ECSDynamicVolumeSize {
|
||||
if o.Resource = v; o.Resource == nil {
|
||||
o.nullFields = append(o.nullFields, "Resource")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSDynamicVolumeSize) SetSizePerResourceUnit(v *int) *ECSDynamicVolumeSize {
|
||||
if o.SizePerResourceUnit = v; o.SizePerResourceUnit == nil {
|
||||
o.nullFields = append(o.nullFields, "SizePerResourceUnit")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
|
97
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/ocean.go
generated
vendored
97
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/ocean.go
generated
vendored
|
@ -51,6 +51,7 @@ type Strategy struct {
|
|||
FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"`
|
||||
DrainingTimeout *int `json:"drainingTimeout,omitempty"`
|
||||
GracePeriod *int `json:"gracePeriod,omitempty"`
|
||||
UtilizeCommitments *bool `json:"utilizeCommitments,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -119,6 +120,7 @@ type LaunchSpecification struct {
|
|||
RootVolumeSize *int `json:"rootVolumeSize,omitempty"`
|
||||
Monitoring *bool `json:"monitoring,omitempty"`
|
||||
EBSOptimized *bool `json:"ebsOptimized,omitempty"`
|
||||
UseAsTemplateOnly *bool `json:"useAsTemplateOnly,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -255,6 +257,7 @@ type RollSpec struct {
|
|||
ID *string `json:"id,omitempty"`
|
||||
ClusterID *string `json:"clusterId,omitempty"`
|
||||
Comment *string `json:"comment,omitempty"`
|
||||
Status *string `json:"status,omitempty"`
|
||||
BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
|
||||
DisableLaunchSpecAutoScaling *bool `json:"disableLaunchSpecAutoScaling,omitempty"`
|
||||
LaunchSpecIDs []string `json:"launchSpecIds,omitempty"`
|
||||
|
@ -265,22 +268,22 @@ type RollSpec struct {
|
|||
}
|
||||
|
||||
type RollStatus struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
ClusterID *string `json:"oceanId,omitempty"`
|
||||
Comment *string `json:"comment,omitempty"`
|
||||
Status *string `json:"status,omitempty"`
|
||||
Progress *Progress `json:"progress,omitempty"`
|
||||
CurrentBatch *int `json:"currentBatch,omitempty"`
|
||||
NumOfBatches *int `json:"numOfBatches,omitempty"`
|
||||
LaunchSpecIDs []string `json:"launchSpecIds,omitempty"`
|
||||
InstanceIDs []string `json:"instanceIds,omitempty"`
|
||||
CreatedAt *string `json:"createdAt,omitempty"`
|
||||
UpdatedAt *string `json:"updatedAt,omitempty"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
ClusterID *string `json:"oceanId,omitempty"`
|
||||
Comment *string `json:"comment,omitempty"`
|
||||
Status *string `json:"status,omitempty"`
|
||||
Progress *Progress `json:"progress,omitempty"`
|
||||
CurrentBatch *int `json:"currentBatch,omitempty"`
|
||||
NumOfBatches *int `json:"numOfBatches,omitempty"`
|
||||
LaunchSpecIDs []string `json:"launchSpecIds,omitempty"`
|
||||
InstanceIDs []string `json:"instanceIds,omitempty"`
|
||||
CreatedAt *time.Time `json:"createdAt,omitempty"`
|
||||
UpdatedAt *time.Time `json:"updatedAt,omitempty"`
|
||||
}
|
||||
|
||||
type Progress struct {
|
||||
Unit *string `json:"unit,omitempty"`
|
||||
Value *int `json:"value,omitempty"`
|
||||
Unit *string `json:"unit,omitempty"`
|
||||
Value *float64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type ListRollsInput struct {
|
||||
|
@ -633,12 +636,14 @@ func (s *ServiceOp) ReadRoll(ctx context.Context, input *ReadRollInput) (*ReadRo
|
|||
func (s *ServiceOp) UpdateRoll(ctx context.Context, input *UpdateRollInput) (*UpdateRollOutput, error) {
|
||||
path, err := uritemplates.Expand("/ocean/aws/k8s/cluster/{clusterId}/roll/{rollId}", uritemplates.Values{
|
||||
"clusterId": spotinst.StringValue(input.Roll.ClusterID),
|
||||
"rollId": spotinst.StringValue(input.Roll.ID),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We do not need the ID anymore so let's drop it.
|
||||
input.Roll.ID = nil
|
||||
input.Roll.ClusterID = nil
|
||||
|
||||
r := client.NewRequest(http.MethodPut, path)
|
||||
|
@ -813,6 +818,13 @@ func (o *Strategy) SetGracePeriod(v *int) *Strategy {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *Strategy) SetUtilizeCommitments(v *bool) *Strategy {
|
||||
if o.UtilizeCommitments = v; o.UtilizeCommitments == nil {
|
||||
o.nullFields = append(o.nullFields, "UtilizeCommitments")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region Capacity
|
||||
|
@ -1065,6 +1077,13 @@ func (o *LaunchSpecification) SetEBSOptimized(v *bool) *LaunchSpecification {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *LaunchSpecification) SetUseAsTemplateOnly(v *bool) *LaunchSpecification {
|
||||
if o.UseAsTemplateOnly = v; o.UseAsTemplateOnly == nil {
|
||||
o.nullFields = append(o.nullFields, "UseAsTemplateOnly")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region LoadBalancer
|
||||
|
@ -1305,3 +1324,55 @@ func (o *Roll) SetInstanceIDs(v []string) *Roll {
|
|||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region RollSpec
|
||||
|
||||
func (o RollSpec) MarshalJSON() ([]byte, error) {
|
||||
type noMethod RollSpec
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetComment(v *string) *RollSpec {
|
||||
if o.Comment = v; o.Comment == nil {
|
||||
o.nullFields = append(o.nullFields, "Comment")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetStatus(v *string) *RollSpec {
|
||||
if o.Status = v; o.Status == nil {
|
||||
o.nullFields = append(o.nullFields, "Status")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetBatchSizePercentage(v *int) *RollSpec {
|
||||
if o.BatchSizePercentage = v; o.BatchSizePercentage == nil {
|
||||
o.nullFields = append(o.nullFields, "BatchSizePercentage")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetDisableLaunchSpecAutoScaling(v *bool) *RollSpec {
|
||||
if o.DisableLaunchSpecAutoScaling = v; o.DisableLaunchSpecAutoScaling == nil {
|
||||
o.nullFields = append(o.nullFields, "DisableLaunchSpecAutoScaling")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetLaunchSpecIDs(v []string) *RollSpec {
|
||||
if o.LaunchSpecIDs = v; o.LaunchSpecIDs == nil {
|
||||
o.nullFields = append(o.nullFields, "LaunchSpecIDs")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *RollSpec) SetInstanceIDs(v []string) *RollSpec {
|
||||
if o.InstanceIDs = v; o.InstanceIDs == nil {
|
||||
o.nullFields = append(o.nullFields, "InstanceIDs")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
|
82
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/ocean_ecs.go
generated
vendored
82
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/ocean_ecs.go
generated
vendored
|
@ -48,6 +48,7 @@ type ECSCluster struct {
|
|||
type ECSStrategy struct {
|
||||
DrainingTimeout *int `json:"drainingTimeout,omitempty"`
|
||||
UtilizeReservedInstances *bool `json:"utilizeReservedInstances,omitempty"`
|
||||
UtilizeCommitments *bool `json:"utilizeCommitments,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -90,6 +91,7 @@ type ECSCapacity struct {
|
|||
type ECSCompute struct {
|
||||
InstanceTypes *ECSInstanceTypes `json:"instanceTypes,omitempty"`
|
||||
LaunchSpecification *ECSLaunchSpecification `json:"launchSpecification,omitempty"`
|
||||
OptimizeImages *ECSOptimizeImages `json:"optimizeImages,omitempty"`
|
||||
SubnetIDs []string `json:"subnetIds,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
|
@ -104,15 +106,25 @@ type ECSInstanceTypes struct {
|
|||
}
|
||||
|
||||
type ECSLaunchSpecification struct {
|
||||
AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
KeyPair *string `json:"keyPair,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
IAMInstanceProfile *ECSIAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
Monitoring *bool `json:"monitoring,omitempty"`
|
||||
EBSOptimized *bool `json:"ebsOptimized,omitempty"`
|
||||
AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"`
|
||||
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
|
||||
ImageID *string `json:"imageId,omitempty"`
|
||||
KeyPair *string `json:"keyPair,omitempty"`
|
||||
UserData *string `json:"userData,omitempty"`
|
||||
IAMInstanceProfile *ECSIAMInstanceProfile `json:"iamInstanceProfile,omitempty"`
|
||||
Tags []*Tag `json:"tags,omitempty"`
|
||||
Monitoring *bool `json:"monitoring,omitempty"`
|
||||
EBSOptimized *bool `json:"ebsOptimized,omitempty"`
|
||||
BlockDeviceMappings []*ECSBlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
}
|
||||
|
||||
type ECSOptimizeImages struct {
|
||||
PerformAt *string `json:"performAt,omitempty"`
|
||||
TimeWindows []string `json:"timeWindows,omitempty"`
|
||||
ShouldOptimizeECSAMI *bool `json:"shouldOptimizeEcsAmi,omitempty"`
|
||||
|
||||
forceSendFields []string
|
||||
nullFields []string
|
||||
|
@ -663,6 +675,13 @@ func (o *ECSCompute) SetSubnetIDs(v []string) *ECSCompute {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *ECSCompute) SetOptimizeImages(v *ECSOptimizeImages) *ECSCompute {
|
||||
if o.OptimizeImages = v; o.OptimizeImages == nil {
|
||||
o.nullFields = append(o.nullFields, "OptimizeImages")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region Strategy
|
||||
|
@ -687,6 +706,13 @@ func (o *ECSStrategy) SetUtilizeReservedInstances(v *bool) *ECSStrategy {
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *ECSStrategy) SetUtilizeCommitments(v *bool) *ECSStrategy {
|
||||
if o.UtilizeCommitments = v; o.UtilizeCommitments == nil {
|
||||
o.nullFields = append(o.nullFields, "UtilizeCommitments")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region InstanceTypes
|
||||
|
@ -777,6 +803,44 @@ func (o *ECSLaunchSpecification) SetEBSOptimized(v *bool) *ECSLaunchSpecificatio
|
|||
return o
|
||||
}
|
||||
|
||||
func (o *ECSLaunchSpecification) SetBlockDeviceMappings(v []*ECSBlockDeviceMapping) *ECSLaunchSpecification {
|
||||
if o.BlockDeviceMappings = v; o.BlockDeviceMappings == nil {
|
||||
o.nullFields = append(o.nullFields, "BlockDeviceMappings")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region ECSOptimizeImages
|
||||
|
||||
func (o ECSOptimizeImages) MarshalJSON() ([]byte, error) {
|
||||
type noMethod ECSOptimizeImages
|
||||
raw := noMethod(o)
|
||||
return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
|
||||
}
|
||||
|
||||
func (o *ECSOptimizeImages) SetPerformAt(v *string) *ECSOptimizeImages {
|
||||
if o.PerformAt = v; o.PerformAt == nil {
|
||||
o.nullFields = append(o.nullFields, "PerformAt")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSOptimizeImages) SetTimeWindows(v []string) *ECSOptimizeImages {
|
||||
if o.TimeWindows = v; o.TimeWindows == nil {
|
||||
o.nullFields = append(o.nullFields, "TimeWindows")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *ECSOptimizeImages) SetShouldOptimizeECSAMI(v *bool) *ECSOptimizeImages {
|
||||
if o.ShouldOptimizeECSAMI = v; o.ShouldOptimizeECSAMI == nil {
|
||||
o.nullFields = append(o.nullFields, "ShouldOptimizeECSAMI")
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region IAMInstanceProfile
|
||||
|
|
12
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/right_sizing.go
generated
vendored
12
vendor/github.com/spotinst/spotinst-sdk-go/service/ocean/providers/aws/right_sizing.go
generated
vendored
|
@ -13,12 +13,12 @@ import (
|
|||
|
||||
// ResourceSuggestion represents a single resource suggestion.
|
||||
type ResourceSuggestion struct {
|
||||
DeploymentName *string `json:"deploymentName,omitempty"`
|
||||
Namespace *string `json:"namespace,omitempty"`
|
||||
SuggestedCPU *int `json:"suggestedCPU,omitempty"`
|
||||
RequestedCPU *int `json:"requestedCPU,omitempty"`
|
||||
SuggestedMemory *int `json:"suggestedMemory,omitempty"`
|
||||
RequestedMemory *int `json:"requestedMemory,omitempty"`
|
||||
DeploymentName *string `json:"deploymentName,omitempty"`
|
||||
Namespace *string `json:"namespace,omitempty"`
|
||||
SuggestedCPU *float64 `json:"suggestedCPU,omitempty"`
|
||||
RequestedCPU *float64 `json:"requestedCPU,omitempty"`
|
||||
SuggestedMemory *float64 `json:"suggestedMemory,omitempty"`
|
||||
RequestedMemory *float64 `json:"requestedMemory,omitempty"`
|
||||
}
|
||||
|
||||
// ListResourceSuggestionsInput represents the input of `ListResourceSuggestions` function.
|
||||
|
|
|
@ -427,7 +427,7 @@ type ImportOceanGKELaunchSpecOutput struct {
|
|||
|
||||
// endregion
|
||||
|
||||
//region AutoScale
|
||||
// region AutoScale
|
||||
|
||||
func (o AutoScale) MarshalJSON() ([]byte, error) {
|
||||
type noMethod AutoScale
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package spotinst
|
||||
|
||||
// SDKVersion is the current version of the SDK.
|
||||
const SDKVersion = "1.58.0"
|
||||
const SDKVersion = "1.75.0"
|
||||
|
||||
// SDKName is the name of the SDK.
|
||||
const SDKName = "spotinst-sdk-go"
|
||||
|
|
|
@ -248,7 +248,7 @@ github.com/ghodss/yaml
|
|||
## explicit
|
||||
github.com/go-bindata/go-bindata/v3
|
||||
github.com/go-bindata/go-bindata/v3/go-bindata
|
||||
# github.com/go-ini/ini v1.51.0
|
||||
# github.com/go-ini/ini v1.62.0
|
||||
## explicit
|
||||
github.com/go-ini/ini
|
||||
# github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6
|
||||
|
@ -525,7 +525,7 @@ github.com/spf13/pflag
|
|||
# github.com/spf13/viper v1.7.0
|
||||
## explicit
|
||||
github.com/spf13/viper
|
||||
# github.com/spotinst/spotinst-sdk-go v1.58.0
|
||||
# github.com/spotinst/spotinst-sdk-go v1.75.0
|
||||
## explicit
|
||||
github.com/spotinst/spotinst-sdk-go/service/elastigroup
|
||||
github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws
|
||||
|
|
Loading…
Reference in New Issue