mirror of https://github.com/knative/pkg.git
Retry cluster creation if there's an error (#1186)
* retry cluster creation if there's an error * nit renaming * go fmt * delete cluster async * clean up the code a bit * nits * address comments * fix unit tests * nits * go fmt * fix unit tests
This commit is contained in:
parent
0cf29f826c
commit
7727cb37e0
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2020 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustermanager
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGKEMinNodes = 1
|
||||
defaultGKEMaxNodes = 3
|
||||
defaultGKENodeType = "e2-standard-4"
|
||||
defaultGKERegion = "us-central1"
|
||||
defaultGKEZone = ""
|
||||
regionEnv = "E2E_CLUSTER_REGION"
|
||||
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
|
||||
defaultResourceType = boskos.GKEProjectResource
|
||||
|
||||
clusterRunning = "RUNNING"
|
||||
)
|
||||
|
||||
var (
|
||||
protectedProjects = []string{"knative-tests"}
|
||||
protectedClusters = []string{"knative-prow"}
|
||||
defaultGKEBackupRegions = []string{"us-west1", "us-east1"}
|
||||
|
||||
// If one of the error patterns below is matched, it would be recommended to
|
||||
// retry creating the cluster in a different region/zone.
|
||||
// - stockout (https://github.com/knative/test-infra/issues/592)
|
||||
// - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694)
|
||||
retryableCreationErrors = []*regexp.Regexp{
|
||||
regexp.MustCompile(".*Master version \"[0-9a-z\\-.]+\" is unsupported.*"),
|
||||
regexp.MustCompile(".*No valid versions with the prefix \"[0-9.]+\" found.*"),
|
||||
regexp.MustCompile(".*does not have enough resources available to fulfill.*"),
|
||||
regexp.MustCompile(".*only \\d+ nodes out of \\d+ have registered; this is likely due to Nodes failing to start correctly.*"),
|
||||
}
|
||||
)
|
||||
|
|
@ -30,25 +30,6 @@ import (
|
|||
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultGKEMinNodes = 1
|
||||
DefaultGKEMaxNodes = 3
|
||||
DefaultGKENodeType = "e2-standard-4"
|
||||
DefaultGKERegion = "us-central1"
|
||||
DefaultGKEZone = ""
|
||||
regionEnv = "E2E_CLUSTER_REGION"
|
||||
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
|
||||
DefaultResourceType = boskos.GKEProjectResource
|
||||
|
||||
ClusterRunning = "RUNNING"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultGKEBackupRegions = []string{"us-west1", "us-east1"}
|
||||
protectedProjects = []string{"knative-tests"}
|
||||
protectedClusters = []string{"knative-prow"}
|
||||
)
|
||||
|
||||
// GKEClient implements Client
|
||||
type GKEClient struct {
|
||||
}
|
||||
|
|
@ -65,10 +46,6 @@ type GKERequest struct {
|
|||
// SkipCreation: skips cluster creation
|
||||
SkipCreation bool
|
||||
|
||||
// NeedsCleanup: enforce clean up if given this option, used when running
|
||||
// locally
|
||||
NeedsCleanup bool
|
||||
|
||||
// ResourceType: the boskos resource type to acquire to hold the cluster in create
|
||||
ResourceType string
|
||||
}
|
||||
|
|
@ -80,9 +57,9 @@ type GKECluster struct {
|
|||
Project string
|
||||
// IsBoskos is true if the GCP project used is managed by boskos
|
||||
IsBoskos bool
|
||||
// NeedsCleanup tells whether the cluster needs to be deleted afterwards
|
||||
// This probably should be part of task wrapper's logic
|
||||
NeedsCleanup bool
|
||||
// AsyncCleanup tells whether the cluster needs to be deleted asynchronously afterwards
|
||||
// It should be true on Prow but false on local.
|
||||
AsyncCleanup bool
|
||||
Cluster *container.Cluster
|
||||
operations gke.SDKOperations
|
||||
boskosOps boskos.Operation
|
||||
|
|
@ -93,45 +70,47 @@ type GKECluster struct {
|
|||
func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
|
||||
gc := &GKECluster{}
|
||||
|
||||
if r.Project != "" { // use provided project and create cluster
|
||||
if r.Project != "" { // use provided project to create cluster
|
||||
gc.Project = r.Project
|
||||
gc.NeedsCleanup = true
|
||||
gc.AsyncCleanup = true
|
||||
} else if common.IsProw() { // if no project is provided and is on Prow, use boskos
|
||||
gc.IsBoskos = true
|
||||
}
|
||||
|
||||
if r.MinNodes == 0 {
|
||||
r.MinNodes = DefaultGKEMinNodes
|
||||
r.MinNodes = defaultGKEMinNodes
|
||||
}
|
||||
if r.MaxNodes == 0 {
|
||||
r.MaxNodes = DefaultGKEMaxNodes
|
||||
r.MaxNodes = defaultGKEMaxNodes
|
||||
// We don't want MaxNodes < MinNodes
|
||||
if r.MinNodes > r.MaxNodes {
|
||||
r.MaxNodes = r.MinNodes
|
||||
}
|
||||
}
|
||||
if r.NodeType == "" {
|
||||
r.NodeType = DefaultGKENodeType
|
||||
r.NodeType = defaultGKENodeType
|
||||
}
|
||||
// Only use default backup regions if region is not provided
|
||||
if len(r.BackupRegions) == 0 && r.Region == "" {
|
||||
r.BackupRegions = DefaultGKEBackupRegions
|
||||
r.BackupRegions = defaultGKEBackupRegions
|
||||
if common.GetOSEnv(backupRegionEnv) != "" {
|
||||
r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ")
|
||||
}
|
||||
}
|
||||
if r.Region == "" {
|
||||
r.Region = DefaultGKERegion
|
||||
r.Region = defaultGKERegion
|
||||
if common.GetOSEnv(regionEnv) != "" {
|
||||
r.Region = common.GetOSEnv(regionEnv)
|
||||
}
|
||||
}
|
||||
if r.Zone == "" {
|
||||
r.Zone = DefaultGKEZone
|
||||
r.Zone = defaultGKEZone
|
||||
} else { // No backupregions if zone is provided
|
||||
r.BackupRegions = make([]string, 0)
|
||||
}
|
||||
|
||||
if r.ResourceType == "" {
|
||||
r.ResourceType = DefaultResourceType
|
||||
r.ResourceType = defaultResourceType
|
||||
}
|
||||
|
||||
gc.Request = &r
|
||||
|
|
@ -178,18 +157,13 @@ func (gc *GKECluster) Acquire() error {
|
|||
// If comes here we are very likely going to create a cluster, unless
|
||||
// the cluster already exists
|
||||
|
||||
// Cleanup if cluster is created by this client
|
||||
gc.NeedsCleanup = !common.IsProw()
|
||||
|
||||
// Get project name from boskos if running in Prow, otherwise it should fail
|
||||
// since we don't know which project to use
|
||||
if gc.Request.Project == "" && common.IsProw() {
|
||||
// If running on Prow and project name is not provided, get project name from boskos.
|
||||
if gc.Project == "" && gc.IsBoskos {
|
||||
project, err := gc.boskosOps.AcquireGKEProject(gc.Request.ResourceType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed acquiring boskos project: '%v'", err)
|
||||
return fmt.Errorf("failed acquiring boskos project: '%w'", err)
|
||||
}
|
||||
gc.Project = project.Name
|
||||
gc.IsBoskos = true
|
||||
}
|
||||
if gc.Project == "" {
|
||||
return errors.New("GCP project must be set")
|
||||
|
|
@ -230,7 +204,7 @@ func (gc *GKECluster) Acquire() error {
|
|||
clusterName := request.ClusterName
|
||||
// Use cluster if it already exists and running
|
||||
existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName)
|
||||
if existingCluster != nil && existingCluster.Status == ClusterRunning {
|
||||
if existingCluster != nil && existingCluster.Status == clusterRunning {
|
||||
gc.Cluster = existingCluster
|
||||
return nil
|
||||
}
|
||||
|
|
@ -242,13 +216,12 @@ func (gc *GKECluster) Acquire() error {
|
|||
}
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
|
||||
if gc.NeedsCleanup { // Delete half created cluster if it's user created
|
||||
if !common.IsProw() { // Delete half created cluster if it's user created
|
||||
errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone)
|
||||
gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName)
|
||||
}
|
||||
// Retry another region if cluster creation failed.
|
||||
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
|
||||
if i != len(regions)-1 {
|
||||
if i != len(regions)-1 && needsRetryCreation(err.Error()) {
|
||||
errMsg = fmt.Sprintf("%sRetry another region %q for cluster creation", errMsg, regions[i+1])
|
||||
}
|
||||
log.Print(errMsg)
|
||||
|
|
@ -262,25 +235,32 @@ func (gc *GKECluster) Acquire() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Delete takes care of GKE cluster resource cleanup. It only release Boskos resource if running in
|
||||
// Prow, otherwise deletes the cluster if marked NeedsCleanup
|
||||
// needsRetryCreation determines if cluster creation needs to be retried based on the error message.
|
||||
func needsRetryCreation(errMsg string) bool {
|
||||
for _, regx := range retryableCreationErrors {
|
||||
if regx.MatchString(errMsg) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete takes care of GKE cluster resource cleanup.
|
||||
// It also releases Boskos resource if running in Prow.
|
||||
func (gc *GKECluster) Delete() error {
|
||||
if err := gc.checkEnvironment(); err != nil {
|
||||
return fmt.Errorf("failed checking project/cluster from environment: '%v'", err)
|
||||
var err error
|
||||
if err = gc.checkEnvironment(); err != nil {
|
||||
return fmt.Errorf("failed checking project/cluster from environment: '%w'", err)
|
||||
}
|
||||
gc.ensureProtected()
|
||||
// Release Boskos if running in Prow, will let Janitor taking care of
|
||||
// clusters deleting
|
||||
// Release Boskos if running in Prow
|
||||
if gc.IsBoskos {
|
||||
log.Printf("Releasing Boskos resource: '%v'", gc.Project)
|
||||
return gc.boskosOps.ReleaseGKEProject(gc.Project)
|
||||
if err = gc.boskosOps.ReleaseGKEProject(gc.Project); err != nil {
|
||||
return fmt.Errorf("failed releasing boskos resource: '%w'", err)
|
||||
}
|
||||
}
|
||||
|
||||
// NeedsCleanup is only true if running locally and cluster created by the
|
||||
// process
|
||||
if !gc.NeedsCleanup && !gc.Request.NeedsCleanup {
|
||||
return nil
|
||||
}
|
||||
// Should only get here if running locally and cluster created by this
|
||||
// client, so at this moment cluster should have been set
|
||||
if gc.Cluster == nil {
|
||||
|
|
@ -288,8 +268,13 @@ func (gc *GKECluster) Delete() error {
|
|||
}
|
||||
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
|
||||
region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location)
|
||||
if err := gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name); err != nil {
|
||||
return fmt.Errorf("failed deleting cluster: '%v'", err)
|
||||
if gc.AsyncCleanup {
|
||||
_, err = gc.operations.DeleteClusterAsync(gc.Project, region, zone, gc.Cluster.Name)
|
||||
} else {
|
||||
err = gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed deleting cluster: '%w'", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package clustermanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
|
@ -28,6 +29,7 @@ import (
|
|||
|
||||
container "google.golang.org/api/container/v1beta1"
|
||||
boskoscommon "k8s.io/test-infra/boskos/common"
|
||||
|
||||
"knative.dev/pkg/test/gke"
|
||||
gkeFake "knative.dev/pkg/test/gke/fake"
|
||||
boskosFake "knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos/fake"
|
||||
|
|
@ -85,7 +87,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -104,7 +106,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -147,10 +149,10 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
Project: fakeProj,
|
||||
NeedsCleanup: true,
|
||||
AsyncCleanup: true,
|
||||
},
|
||||
}, {
|
||||
name: "Project provided, running in Prow",
|
||||
|
|
@ -173,10 +175,10 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
Project: fakeProj,
|
||||
NeedsCleanup: true,
|
||||
AsyncCleanup: true,
|
||||
},
|
||||
}, {
|
||||
name: "Cluster name provided, not running in Prow",
|
||||
|
|
@ -198,7 +200,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -221,7 +223,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -248,7 +250,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -274,7 +276,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: nil,
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -299,7 +301,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: nil,
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -318,7 +320,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -337,7 +339,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: nil,
|
||||
},
|
||||
BackupRegions: []string{"backupregion1", "backupregion2"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
|
|
@ -360,7 +362,7 @@ func TestSetup(t *testing.T) {
|
|||
Addons: []string{fakeAddons},
|
||||
},
|
||||
BackupRegions: []string{"us-west1", "us-east1"},
|
||||
ResourceType: DefaultResourceType,
|
||||
ResourceType: defaultResourceType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -583,8 +585,8 @@ func TestAcquire(t *testing.T) {
|
|||
name: "cluster not exist, running in Prow and boskos not available",
|
||||
td: testdata{
|
||||
request: request{clusterName: predefinedClusterName, addons: []string{}},
|
||||
isProw: true, project: fakeProj, nextOpStatus: []string{}, boskosProjs: []string{}, skipCreation: false},
|
||||
want: wantResult{expCluster: nil, expErr: fmt.Errorf("failed acquiring boskos project: 'no GKE project available'"), expPanic: false},
|
||||
isProw: true, nextOpStatus: []string{}, boskosProjs: []string{}, skipCreation: false},
|
||||
want: wantResult{expCluster: nil, expErr: fmt.Errorf("failed acquiring boskos project: '%w'", errors.New("no GKE project available")), expPanic: false},
|
||||
}, {
|
||||
name: "cluster not exist, running in Prow and boskos available",
|
||||
td: testdata{
|
||||
|
|
@ -599,7 +601,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -613,7 +615,7 @@ func TestAcquire(t *testing.T) {
|
|||
td: testdata{
|
||||
request: request{clusterName: predefinedClusterName, addons: []string{}},
|
||||
isProw: true, nextOpStatus: []string{}, boskosProjs: []string{}, skipCreation: false},
|
||||
want: wantResult{expCluster: nil, expErr: fmt.Errorf("failed acquiring boskos project: 'no GKE project available'"), expPanic: false},
|
||||
want: wantResult{expCluster: nil, expErr: fmt.Errorf("failed acquiring boskos project: '%w'", errors.New("no GKE project available")), expPanic: false},
|
||||
},
|
||||
{
|
||||
name: "cluster not exist, project not set, running in Prow and boskos available",
|
||||
|
|
@ -629,7 +631,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -656,7 +658,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -697,7 +699,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -723,7 +725,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -751,7 +753,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -778,7 +780,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -817,7 +819,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -845,7 +847,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -871,7 +873,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -896,7 +898,7 @@ func TestAcquire(t *testing.T) {
|
|||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
InitialNodeCount: defaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
|
|
@ -995,15 +997,15 @@ func TestAcquire(t *testing.T) {
|
|||
fgc.Request = &GKERequest{
|
||||
Request: gke.Request{
|
||||
ClusterName: tt.td.request.clusterName,
|
||||
MinNodes: DefaultGKEMinNodes,
|
||||
MaxNodes: DefaultGKEMaxNodes,
|
||||
NodeType: DefaultGKENodeType,
|
||||
Region: DefaultGKERegion,
|
||||
MinNodes: defaultGKEMinNodes,
|
||||
MaxNodes: defaultGKEMaxNodes,
|
||||
NodeType: defaultGKENodeType,
|
||||
Region: defaultGKERegion,
|
||||
Zone: "",
|
||||
Addons: tt.td.request.addons,
|
||||
},
|
||||
BackupRegions: DefaultGKEBackupRegions,
|
||||
ResourceType: DefaultResourceType,
|
||||
BackupRegions: defaultGKEBackupRegions,
|
||||
ResourceType: defaultResourceType,
|
||||
}
|
||||
opCount := 0
|
||||
if data.existCluster != nil {
|
||||
|
|
@ -1019,12 +1021,15 @@ func TestAcquire(t *testing.T) {
|
|||
fgc.operations.(*gkeFake.GKESDKClient).OpStatus[strconv.Itoa(opCount+i)] = status
|
||||
}
|
||||
|
||||
if data.isProw && data.project == "" {
|
||||
fgc.IsBoskos = true
|
||||
}
|
||||
if data.skipCreation {
|
||||
fgc.Request.SkipCreation = true
|
||||
}
|
||||
// Set NeedsCleanup to false for easier testing, as it launches a
|
||||
// Set AsyncCleanup to false for easier testing, as it launches a
|
||||
// goroutine
|
||||
fgc.NeedsCleanup = false
|
||||
fgc.AsyncCleanup = false
|
||||
err := fgc.Acquire()
|
||||
errMsg := fmt.Sprintf("testing acquiring cluster, with:\n\tisProw: '%v'\n\tproject: '%v'\n\texisting cluster: '%+v'\n\tSkip creation: '%+v'\n\t"+
|
||||
"next operations outcomes: '%v'\n\taddons: '%v'\n\tboskos projects: '%v'",
|
||||
|
|
@ -1041,12 +1046,10 @@ func TestAcquire(t *testing.T) {
|
|||
|
||||
func TestDelete(t *testing.T) {
|
||||
type testdata struct {
|
||||
isProw bool
|
||||
isBoskos bool
|
||||
NeedsCleanup bool
|
||||
requestCleanup bool
|
||||
boskosState []*boskoscommon.Resource
|
||||
cluster *container.Cluster
|
||||
isProw bool
|
||||
isBoskos bool
|
||||
boskosState []*boskoscommon.Resource
|
||||
cluster *container.Cluster
|
||||
}
|
||||
type wantResult struct {
|
||||
Boskos []*boskoscommon.Resource
|
||||
|
|
@ -1059,46 +1062,10 @@ func TestDelete(t *testing.T) {
|
|||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "Not in prow, NeedsCleanup is false",
|
||||
name: "Not in prow",
|
||||
td: testdata{
|
||||
isProw: false,
|
||||
NeedsCleanup: false,
|
||||
requestCleanup: false,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
cluster: &container.Cluster{
|
||||
Name: "customcluster",
|
||||
Location: "us-central1",
|
||||
},
|
||||
},
|
||||
want: wantResult{
|
||||
nil,
|
||||
&container.Cluster{
|
||||
Name: "customcluster",
|
||||
Location: "us-central1",
|
||||
Status: "RUNNING",
|
||||
AddonsConfig: &container.AddonsConfig{},
|
||||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
},
|
||||
MasterAuth: &container.MasterAuth{
|
||||
Username: "admin",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Not in prow, NeedsCleanup is true",
|
||||
td: testdata{
|
||||
isProw: false,
|
||||
NeedsCleanup: true,
|
||||
requestCleanup: false,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
isProw: false,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
cluster: &container.Cluster{
|
||||
Name: "customcluster",
|
||||
Location: "us-central1",
|
||||
|
|
@ -1111,31 +1078,11 @@ func TestDelete(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "Not in prow, NeedsCleanup is false, requestCleanup is true",
|
||||
name: "Not in prow, but cluster doesn't exist",
|
||||
td: testdata{
|
||||
isProw: false,
|
||||
NeedsCleanup: false,
|
||||
requestCleanup: true,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
cluster: &container.Cluster{
|
||||
Name: "customcluster",
|
||||
Location: "us-central1",
|
||||
},
|
||||
},
|
||||
want: wantResult{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Not in prow, NeedsCleanup is true, but cluster doesn't exist",
|
||||
td: testdata{
|
||||
isProw: false,
|
||||
NeedsCleanup: true,
|
||||
requestCleanup: false,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
cluster: nil,
|
||||
isProw: false,
|
||||
boskosState: []*boskoscommon.Resource{},
|
||||
cluster: nil,
|
||||
},
|
||||
want: wantResult{
|
||||
nil,
|
||||
|
|
@ -1144,12 +1091,10 @@ func TestDelete(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "In prow, only need to release boskos",
|
||||
name: "In prow",
|
||||
td: testdata{
|
||||
isProw: true,
|
||||
isBoskos: true,
|
||||
NeedsCleanup: true,
|
||||
requestCleanup: false,
|
||||
isProw: true,
|
||||
isBoskos: true,
|
||||
boskosState: []*boskoscommon.Resource{{
|
||||
Name: fakeProj,
|
||||
}},
|
||||
|
|
@ -1164,23 +1109,7 @@ func TestDelete(t *testing.T) {
|
|||
Name: fakeProj,
|
||||
State: boskoscommon.Free,
|
||||
}},
|
||||
&container.Cluster{
|
||||
Name: "customcluster",
|
||||
Location: "us-central1",
|
||||
Status: "RUNNING",
|
||||
AddonsConfig: &container.AddonsConfig{},
|
||||
NodePools: []*container.NodePool{
|
||||
{
|
||||
Name: "default-pool",
|
||||
InitialNodeCount: DefaultGKEMinNodes,
|
||||
Config: &container.NodeConfig{MachineType: "e2-standard-4", OauthScopes: []string{container.CloudPlatformScope}},
|
||||
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
|
||||
},
|
||||
},
|
||||
MasterAuth: &container.MasterAuth{
|
||||
Username: "admin",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
},
|
||||
|
|
@ -1229,13 +1158,12 @@ func TestDelete(t *testing.T) {
|
|||
fgc := setupFakeGKECluster()
|
||||
fgc.Project = fakeProj
|
||||
fgc.IsBoskos = data.isBoskos
|
||||
fgc.NeedsCleanup = data.NeedsCleanup
|
||||
fgc.Request = &GKERequest{
|
||||
Request: gke.Request{
|
||||
MinNodes: DefaultGKEMinNodes,
|
||||
MaxNodes: DefaultGKEMaxNodes,
|
||||
NodeType: DefaultGKENodeType,
|
||||
Region: DefaultGKERegion,
|
||||
MinNodes: defaultGKEMinNodes,
|
||||
MaxNodes: defaultGKEMaxNodes,
|
||||
NodeType: defaultGKENodeType,
|
||||
Region: defaultGKERegion,
|
||||
Zone: "",
|
||||
},
|
||||
}
|
||||
|
|
@ -1249,12 +1177,7 @@ func TestDelete(t *testing.T) {
|
|||
for _, bos := range data.boskosState {
|
||||
fgc.boskosOps.(*boskosFake.FakeBoskosClient).NewGKEProject(bos.Name)
|
||||
// Acquire with default user
|
||||
fgc.boskosOps.(*boskosFake.FakeBoskosClient).AcquireGKEProject(DefaultResourceType)
|
||||
}
|
||||
if data.requestCleanup {
|
||||
fgc.Request = &GKERequest{
|
||||
NeedsCleanup: true,
|
||||
}
|
||||
fgc.boskosOps.(*boskosFake.FakeBoskosClient).AcquireGKEProject(defaultResourceType)
|
||||
}
|
||||
|
||||
err := fgc.Delete()
|
||||
|
|
@ -1263,9 +1186,9 @@ func TestDelete(t *testing.T) {
|
|||
gotCluster, _ = fgc.operations.GetCluster(fakeProj, data.cluster.Location, "", data.cluster.Name)
|
||||
}
|
||||
gotBoskos := fgc.boskosOps.(*boskosFake.FakeBoskosClient).GetResources()
|
||||
errMsg := fmt.Sprintf("testing deleting cluster, with:\n\tIs Prow: '%v'\n\tIs Boskos: '%v'\n\tNeed cleanup: '%v'\n\t"+
|
||||
"Request cleanup: '%v'\n\texisting cluster: '%v'\n\tboskos state: '%v'",
|
||||
data.isProw, data.isBoskos, data.NeedsCleanup, data.requestCleanup, data.cluster, data.boskosState)
|
||||
errMsg := fmt.Sprintf("testing deleting cluster, with:\n\tIs Prow: '%v'\n\tIs Boskos: '%v'\n\t"+
|
||||
"existing cluster: '%v'\n\tboskos state: '%v'",
|
||||
data.isProw, data.isBoskos, data.cluster, data.boskosState)
|
||||
if !reflect.DeepEqual(err, tt.want.Err) {
|
||||
t.Errorf("%s\nerror got: '%v'\nerror want: '%v'", errMsg, err, tt.want.Err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ func getResourceName(rt ResourceType) (string, error) {
|
|||
return "", fmt.Errorf("failed getting BUILD_NUMBER env var")
|
||||
}
|
||||
if len(buildNumStr) > 20 {
|
||||
buildNumStr = string(buildNumStr[:20])
|
||||
buildNumStr = buildNumStr[:20]
|
||||
}
|
||||
resName = fmt.Sprintf("%s-%s", resName, buildNumStr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ const (
|
|||
// after the cluster operation is finished
|
||||
func writeMetaData(cluster *container.Cluster, project string) {
|
||||
// Set up metadata client for saving metadata
|
||||
c, err := client.NewClient("")
|
||||
c, err := client.New("")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,12 +21,12 @@ import (
|
|||
"log"
|
||||
|
||||
clm "knative.dev/pkg/testutils/clustermanager/e2e-tests"
|
||||
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
|
||||
"knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options"
|
||||
)
|
||||
|
||||
// Delete deletes a GKE cluster
|
||||
func Delete(o *options.RequestWrapper) error {
|
||||
o.Request.NeedsCleanup = true
|
||||
o.Request.SkipCreation = true
|
||||
|
||||
gkeClient := clm.GKEClient{}
|
||||
|
|
@ -40,15 +40,13 @@ func Delete(o *options.RequestWrapper) error {
|
|||
if err = gkeOps.Delete(); err != nil {
|
||||
return fmt.Errorf("failed deleting cluster: '%v'", err)
|
||||
}
|
||||
// TODO: uncomment the lines below when previous Delete command becomes
|
||||
// async operation
|
||||
// // Unset context with best effort. The first command only unsets current
|
||||
// // context, but doesn't delete the entry from kubeconfig, and should return it's
|
||||
// // context if succeeded, which can be used by the second command to
|
||||
// // delete it from kubeconfig
|
||||
// if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
|
||||
// common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
|
||||
// }
|
||||
// Unset context with best effort. The first command only unsets current
|
||||
// context, but doesn't delete the entry from kubeconfig, and should return it's
|
||||
// context if succeeded, which can be used by the second command to
|
||||
// delete it from kubeconfig
|
||||
if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
|
||||
common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,10 +39,10 @@ type client struct {
|
|||
Path string
|
||||
}
|
||||
|
||||
// NewClient creates a client, takes custom directory for storing `metadata.json`.
|
||||
// New creates a client, takes custom directory for storing `metadata.json`.
|
||||
// It reads existing `metadata.json` file if it exists, otherwise creates it.
|
||||
// Errors out if there is any file i/o problem other than file not exist error.
|
||||
func NewClient(dir string) (*client, error) {
|
||||
func New(dir string) (*client, error) {
|
||||
c := &client{
|
||||
MetaData: make(map[string]string),
|
||||
}
|
||||
|
|
@ -53,7 +53,7 @@ func NewClient(dir string) (*client, error) {
|
|||
c.Path = path.Join(dir, filename)
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("Failed to create directory: %v", err)
|
||||
return nil, fmt.Errorf("failed creating directory: %w", err)
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func TestNewClient(t *testing.T) {
|
|||
}
|
||||
os.RemoveAll(dir)
|
||||
defer os.RemoveAll(dir)
|
||||
c, err := NewClient(data.customDir)
|
||||
c, err := New(data.customDir)
|
||||
errMsg := fmt.Sprintf("Testing new client with dir: %q", data.customDir)
|
||||
if (err == nil && data.expErr) || (err != nil && !data.expErr) {
|
||||
log.Fatalf("%s\ngot: '%v', want: '%v'", errMsg, err, data.expErr)
|
||||
|
|
@ -88,7 +88,7 @@ func TestSync(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, data := range datas {
|
||||
c, _ := NewClient(fakeArtifactDir)
|
||||
c, _ := New(fakeArtifactDir)
|
||||
os.Remove(c.Path)
|
||||
if data.fileExist {
|
||||
defer os.Remove(c.Path)
|
||||
|
|
@ -126,7 +126,7 @@ func TestSet(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, data := range datas {
|
||||
c, _ := NewClient(fakeArtifactDir)
|
||||
c, _ := New(fakeArtifactDir)
|
||||
defer os.Remove(c.Path)
|
||||
ioutil.WriteFile(c.Path, []byte(data.content), 0644)
|
||||
|
||||
|
|
@ -163,7 +163,7 @@ func TestGet(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, data := range datas {
|
||||
c, _ := NewClient(fakeArtifactDir)
|
||||
c, _ := New(fakeArtifactDir)
|
||||
defer os.Remove(c.Path)
|
||||
ioutil.WriteFile(c.Path, []byte(data.content), 0644)
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func main() {
|
|||
// Create with default path of metahelper/client, so that the path is
|
||||
// consistent with all other consumers of metahelper/client that run within
|
||||
// the same context of this tool
|
||||
c, err := client.NewClient("")
|
||||
c, err := client.New("")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue