DO provider use vendored godo v1.20.0

This commit is contained in:
Steven Normore 2019-09-07 15:03:09 -04:00
parent c1e6736252
commit 08915661b1
9 changed files with 590 additions and 41 deletions

View File

@ -1,5 +1,15 @@
# Change Log
## [v1.20.0] - 2019-09-06
- #252 Add Kubernetes autoscale config fields - @snormore
- #251 Support unset fields on Kubernetes cluster and node pool updates - @snormore
- #250 Add Kubernetes GetUser method - @snormore
## [v1.19.0] - 2019-07-19
- #244 dbaas: add private-network-uuid field to create request
## [v1.18.0] - 2019-07-17
- #241 Databases: support for custom VPC UUID on migrate @mikejholly

View File

@ -1,7 +1,8 @@
[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo)
# Godo
[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo)
[![GoDoc](https://godoc.org/github.com/digitalocean/godo?status.svg)](https://godoc.org/github.com/digitalocean/godo)
Godo is a Go client library for accessing the DigitalOcean V2 API.
You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo)

View File

@ -123,12 +123,13 @@ type DatabaseBackup struct {
// DatabaseCreateRequest represents a request to create a database cluster
type DatabaseCreateRequest struct {
Name string `json:"name,omitempty"`
EngineSlug string `json:"engine,omitempty"`
Version string `json:"version,omitempty"`
SizeSlug string `json:"size,omitempty"`
Region string `json:"region,omitempty"`
NumNodes int `json:"num_nodes,omitempty"`
Name string `json:"name,omitempty"`
EngineSlug string `json:"engine,omitempty"`
Version string `json:"version,omitempty"`
SizeSlug string `json:"size,omitempty"`
Region string `json:"region,omitempty"`
NumNodes int `json:"num_nodes,omitempty"`
PrivateNetworkUUID string `json:"private_network_uuid"`
}
// DatabaseResizeRequest can be used to initiate a database resize operation.

View File

@ -17,7 +17,7 @@ import (
)
const (
libraryVersion = "1.18.0"
libraryVersion = "1.20.0"
defaultBaseURL = "https://api.digitalocean.com/"
userAgent = "godo/" + libraryVersion
mediaType = "application/json"

View File

@ -539,3 +539,11 @@ func TestCustomBaseURL_badURL(t *testing.T) {
testURLParseError(t, err)
}
func intPtr(val int) *int {
return &val
}
func boolPtr(val bool) *bool {
return &val
}

View File

@ -24,6 +24,7 @@ const (
type KubernetesService interface {
Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error)
Get(context.Context, string) (*KubernetesCluster, *Response, error)
GetUser(context.Context, string) (*KubernetesClusterUser, *Response, error)
GetUpgrades(context.Context, string) ([]*KubernetesVersion, *Response, error)
GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error)
List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error)
@ -69,8 +70,8 @@ type KubernetesClusterCreateRequest struct {
type KubernetesClusterUpdateRequest struct {
Name string `json:"name,omitempty"`
Tags []string `json:"tags,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
AutoUpgrade bool `json:"auto_upgrade"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
}
// KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster.
@ -81,18 +82,24 @@ type KubernetesClusterUpgradeRequest struct {
// KubernetesNodePoolCreateRequest represents a request to create a node pool for a
// Kubernetes cluster.
type KubernetesNodePoolCreateRequest struct {
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale bool `json:"auto_scale,omitempty"`
MinNodes int `json:"min_nodes,omitempty"`
MaxNodes int `json:"max_nodes,omitempty"`
}
// KubernetesNodePoolUpdateRequest represents a request to update a node pool in a
// Kubernetes cluster.
type KubernetesNodePoolUpdateRequest struct {
Name string `json:"name,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
Name string `json:"name,omitempty"`
Count *int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale *bool `json:"auto_scale,omitempty"`
MinNodes *int `json:"min_nodes,omitempty"`
MaxNodes *int `json:"max_nodes,omitempty"`
}
// KubernetesNodePoolRecycleNodesRequest is DEPRECATED please use DeleteNode
@ -133,6 +140,12 @@ type KubernetesCluster struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// KubernetesClusterUser represents a Kubernetes cluster user.
type KubernetesClusterUser struct {
Username string `json:"username,omitempty"`
Groups []string `json:"groups,omitempty"`
}
// KubernetesMaintenancePolicy is a configuration to set the maintenance window
// of a cluster
type KubernetesMaintenancePolicy struct {
@ -267,11 +280,14 @@ type KubernetesClusterStatus struct {
// KubernetesNodePool represents a node pool in a Kubernetes cluster.
type KubernetesNodePool struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size string `json:"size,omitempty"`
Count int `json:"count,omitempty"`
Tags []string `json:"tags,omitempty"`
AutoScale bool `json:"auto_scale,omitempty"`
MinNodes int `json:"min_nodes,omitempty"`
MaxNodes int `json:"max_nodes,omitempty"`
Nodes []*KubernetesNode `json:"nodes,omitempty"`
}
@ -326,6 +342,10 @@ type kubernetesClusterRoot struct {
Cluster *KubernetesCluster `json:"kubernetes_cluster,omitempty"`
}
type kubernetesClusterUserRoot struct {
User *KubernetesClusterUser `json:"kubernetes_cluster_user,omitempty"`
}
type kubernetesNodePoolRoot struct {
NodePool *KubernetesNodePool `json:"node_pool,omitempty"`
}
@ -354,6 +374,21 @@ func (svc *KubernetesServiceOp) Get(ctx context.Context, clusterID string) (*Kub
return root.Cluster, resp, nil
}
// GetUser retrieves the details of a Kubernetes cluster user.
func (svc *KubernetesServiceOp) GetUser(ctx context.Context, clusterID string) (*KubernetesClusterUser, *Response, error) {
path := fmt.Sprintf("%s/%s/user", kubernetesClustersPath, clusterID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kubernetesClusterUserRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.User, resp, nil
}
// GetUpgrades retrieves versions a Kubernetes cluster can be upgraded to. An
// upgrade can be requested using `Upgrade`.
func (svc *KubernetesServiceOp) GetUpgrades(ctx context.Context, clusterID string) ([]*KubernetesVersion, *Response, error) {

View File

@ -1,6 +1,7 @@
package godo
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
@ -316,6 +317,34 @@ func TestKubernetesClusters_Get(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetUser(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesClusterUser{
Username: "foo@example.com",
Groups: []string{
"foo:bar",
},
}
jBlob := `
{
"kubernetes_cluster_user": {
"username": "foo@example.com",
"groups": ["foo:bar"]
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/user", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.GetUser(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetKubeConfig(t *testing.T) {
setup()
defer teardown()
@ -408,10 +437,13 @@ func TestKubernetesClusters_Create(t *testing.T) {
VPCUUID: want.VPCUUID,
NodePools: []*KubernetesNodePoolCreateRequest{
&KubernetesNodePoolCreateRequest{
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
AutoScale: want.NodePools[0].AutoScale,
MinNodes: want.NodePools[0].MinNodes,
MaxNodes: want.NodePools[0].MaxNodes,
},
},
MaintenancePolicy: want.MaintenancePolicy,
@ -466,6 +498,110 @@ func TestKubernetesClusters_Create(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "antoine-test-cluster",
RegionSlug: "s2r1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
Tags: []string{"cluster-tag-1", "cluster-tag-2"},
VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6",
NodePools: []*KubernetesNodePool{
&KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
},
},
MaintenancePolicy: &KubernetesMaintenancePolicy{
StartTime: "00:00",
Day: KubernetesMaintenanceDayMonday,
},
}
createRequest := &KubernetesClusterCreateRequest{
Name: want.Name,
RegionSlug: want.RegionSlug,
VersionSlug: want.VersionSlug,
Tags: want.Tags,
VPCUUID: want.VPCUUID,
NodePools: []*KubernetesNodePoolCreateRequest{
&KubernetesNodePoolCreateRequest{
Size: want.NodePools[0].Size,
Count: want.NodePools[0].Count,
Name: want.NodePools[0].Name,
Tags: want.NodePools[0].Tags,
AutoScale: want.NodePools[0].AutoScale,
MinNodes: want.NodePools[0].MinNodes,
MaxNodes: want.NodePools[0].MaxNodes,
},
},
MaintenancePolicy: want.MaintenancePolicy,
}
jBlob := `
{
"kubernetes_cluster": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "antoine-test-cluster",
"region": "s2r1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"tags": [
"cluster-tag-1",
"cluster-tag-2"
],
"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
"node_pools": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
],
"maintenance_policy": {
"start_time": "00:00",
"day": "monday"
}
}
}`
mux.HandleFunc("/v2/kubernetes/clusters", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesClusterCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Create(ctx, createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Update(t *testing.T) {
setup()
defer teardown()
@ -533,12 +669,104 @@ func TestKubernetesClusters_Update(t *testing.T) {
}
}`
expectedReqJSON := `{"name":"antoine-test-cluster","tags":["cluster-tag-1","cluster-tag-2"],"maintenance_policy":{"start_time":"00:00","duration":"","day":"monday"}}
`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f", func(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
require.Equal(t, expectedReqJSON, buf.String())
v := new(KubernetesClusterUpdateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
err := json.NewDecoder(buf).Decode(v)
require.NoError(t, err)
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.Update(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_Update_FalseAutoUpgrade(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesCluster{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
Name: "antoine-test-cluster",
RegionSlug: "s2r1",
VersionSlug: "1.10.0-gen0",
ClusterSubnet: "10.244.0.0/16",
ServiceSubnet: "10.245.0.0/16",
Tags: []string{"cluster-tag-1", "cluster-tag-2"},
VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6",
NodePools: []*KubernetesNodePool{
&KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
},
},
MaintenancePolicy: &KubernetesMaintenancePolicy{
StartTime: "00:00",
Day: KubernetesMaintenanceDayMonday,
},
}
updateRequest := &KubernetesClusterUpdateRequest{
AutoUpgrade: boolPtr(false),
}
jBlob := `
{
"kubernetes_cluster": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
"name": "antoine-test-cluster",
"region": "s2r1",
"version": "1.10.0-gen0",
"cluster_subnet": "10.244.0.0/16",
"service_subnet": "10.245.0.0/16",
"tags": [
"cluster-tag-1",
"cluster-tag-2"
],
"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
"node_pools": [
{
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
]
}
],
"maintenance_policy": {
"start_time": "00:00",
"day": "monday"
}
}
}`
expectedReqJSON := `{"auto_upgrade":false}
`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f", func(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
require.Equal(t, expectedReqJSON, buf.String())
v := new(KubernetesClusterUpdateRequest)
err := json.NewDecoder(buf).Decode(v)
require.NoError(t, err)
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
@ -596,11 +824,14 @@ func TestKubernetesClusters_CreateNodePool(t *testing.T) {
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
createRequest := &KubernetesNodePoolCreateRequest{
Size: want.Size,
@ -639,6 +870,65 @@ func TestKubernetesClusters_CreateNodePool(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_CreateNodePool_AutoScale(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
Size: "s-1vcpu-1gb",
Count: 2,
Name: "pool-a",
Tags: []string{"tag-1"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
}
createRequest := &KubernetesNodePoolCreateRequest{
Size: want.Size,
Count: want.Count,
Name: want.Name,
Tags: want.Tags,
AutoScale: want.AutoScale,
MinNodes: want.MinNodes,
MaxNodes: want.MaxNodes,
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 2,
"name": "pool-a",
"tags": [
"tag-1"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
}`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools", func(w http.ResponseWriter, r *http.Request) {
v := new(KubernetesNodePoolCreateRequest)
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
t.Fatal(err)
}
testMethod(t, r, http.MethodPost)
require.Equal(t, v, createRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.CreateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", createRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_GetNodePool(t *testing.T) {
setup()
defer teardown()
@ -757,15 +1047,18 @@ func TestKubernetesClusters_UpdateNodePool(t *testing.T) {
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "a better name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "a better name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
Name: "a better name",
Count: 4,
Count: intPtr(4),
Tags: []string{"tag-1", "tag-2"},
}
@ -799,6 +1092,121 @@ func TestKubernetesClusters_UpdateNodePool(t *testing.T) {
require.Equal(t, want, got)
}
func TestKubernetesClusters_UpdateNodePool_ZeroCount(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "name",
Size: "s-1vcpu-1gb",
Count: 0,
Tags: []string{"tag-1", "tag-2"},
AutoScale: false,
MinNodes: 0,
MaxNodes: 0,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
Count: intPtr(0),
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 0,
"name": "name",
"tags": [
"tag-1", "tag-2"
]
}
}`
expectedReqJSON := `{"count":0}
`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
require.Equal(t, expectedReqJSON, buf.String())
v := new(KubernetesNodePoolUpdateRequest)
err := json.NewDecoder(buf).Decode(v)
require.NoError(t, err)
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.UpdateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_UpdateNodePool_AutoScale(t *testing.T) {
setup()
defer teardown()
kubeSvc := client.Kubernetes
want := &KubernetesNodePool{
ID: "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
Name: "name",
Size: "s-1vcpu-1gb",
Count: 4,
Tags: []string{"tag-1", "tag-2"},
AutoScale: true,
MinNodes: 0,
MaxNodes: 10,
}
updateRequest := &KubernetesNodePoolUpdateRequest{
AutoScale: boolPtr(true),
MinNodes: intPtr(0),
MaxNodes: intPtr(10),
}
jBlob := `
{
"node_pool": {
"id": "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a",
"size": "s-1vcpu-1gb",
"count": 4,
"name": "name",
"tags": [
"tag-1", "tag-2"
],
"auto_scale": true,
"min_nodes": 0,
"max_nodes": 10
}
}`
expectedReqJSON := `{"auto_scale":true,"min_nodes":0,"max_nodes":10}
`
mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/node_pools/8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", func(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
require.Equal(t, expectedReqJSON, buf.String())
v := new(KubernetesNodePoolUpdateRequest)
err := json.NewDecoder(buf).Decode(v)
require.NoError(t, err)
testMethod(t, r, http.MethodPut)
require.Equal(t, v, updateRequest)
fmt.Fprint(w, jBlob)
})
got, _, err := kubeSvc.UpdateNodePool(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", "8d91899c-nodepool-4a1a-acc5-deadbeefbb8a", updateRequest)
require.NoError(t, err)
require.Equal(t, want, got)
}
func TestKubernetesClusters_DeleteNodePool(t *testing.T) {
setup()
defer teardown()

View File

@ -0,0 +1,57 @@
package util
import (
"context"
"fmt"
"time"
"github.com/digitalocean/godo"
)
const (
// activeFailure is the amount of times we can fail before deciding
// the check for active is a total failure. This can help account
// for servers randomly not answering.
activeFailure = 3
)
// WaitForActive waits for a droplet to become active
func WaitForActive(ctx context.Context, client *godo.Client, monitorURI string) error {
if len(monitorURI) == 0 {
return fmt.Errorf("create had no monitor uri")
}
completed := false
failCount := 0
for !completed {
action, _, err := client.DropletActions.GetByURI(ctx, monitorURI)
if err != nil {
select {
case <-ctx.Done():
return err
default:
}
if failCount <= activeFailure {
failCount++
continue
}
return err
}
switch action.Status {
case godo.ActionInProgress:
select {
case <-time.After(5 * time.Second):
case <-ctx.Done():
return err
}
case godo.ActionCompleted:
completed = true
default:
return fmt.Errorf("unknown status: [%s]", action.Status)
}
}
return nil
}

View File

@ -0,0 +1,29 @@
package util
import (
"context"
"golang.org/x/oauth2"
"github.com/digitalocean/godo"
)
func ExampleWaitForActive() {
// build client
pat := "mytoken"
token := &oauth2.Token{AccessToken: pat}
t := oauth2.StaticTokenSource(token)
ctx := context.TODO()
oauthClient := oauth2.NewClient(ctx, t)
client := godo.NewClient(oauthClient)
// create your droplet and retrieve the create action uri
uri := "https://api.digitalocean.com/v2/actions/xxxxxxxx"
// block until until the action is complete
err := WaitForActive(ctx, client, uri)
if err != nil {
panic(err)
}
}