Modify cluster manager lib to make it work better with Prow (#755)

* Consolidate cluster manager lib

* Adjust unit test

* updates based on feedback

* Fixing unit tests

* Use project as string instead of pointer
This commit is contained in:
chaodaiG 2019-10-11 16:11:31 -07:00 committed by Knative Prow Robot
parent 9c320664c8
commit 7a2cadb6ad
4 changed files with 498 additions and 456 deletions

View File

@ -52,6 +52,6 @@ func Example() {
if err := gkeOps.Acquire(); err != nil {
log.Fatalf("failed acquire cluster: '%v'", err)
}
log.Printf("GKE project is: %s", *gkeOps.Project)
log.Printf("GKE project is: %q", gkeOps.Project)
log.Printf("GKE cluster is: %v", gkeOps.Cluster)
}

View File

@ -37,6 +37,8 @@ const (
regionEnv = "E2E_CLUSTER_REGION"
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
defaultGKEVersion = "latest"
ClusterRunning = "RUNNING"
)
var (
@ -70,7 +72,7 @@ type GKERequest struct {
type GKECluster struct {
Request *GKERequest
// Project might be GKE specific, so put it here
Project *string
Project string
// NeedsCleanup tells whether the cluster needs to be deleted afterwards
// This probably should be part of task wrapper's logic
NeedsCleanup bool
@ -85,10 +87,18 @@ func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
gc := &GKECluster{}
if r.Project != "" { // use provided project and create cluster
gc.Project = &r.Project
gc.Project = r.Project
gc.NeedsCleanup = true
}
if r.ClusterName == "" {
var err error
r.ClusterName, err = getResourceName(ClusterResource)
if err != nil {
log.Fatalf("Failed getting cluster name: '%v'", err)
}
}
if r.MinNodes == 0 {
r.MinNodes = DefaultGKEMinNodes
}
@ -134,41 +144,6 @@ func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
return gc
}
// initialize checks environment for cluster and projects to decide whether using
// existing cluster/project or creating new ones.
func (gc *GKECluster) initialize() error {
// Try obtain project name via `kubectl`, `gcloud`
if gc.Project == nil {
if err := gc.checkEnvironment(); err != nil {
return fmt.Errorf("failed checking existing cluster: '%v'", err)
} else if gc.Cluster != nil { // Return if Cluster was already set by kubeconfig
// If clustername provided and kubeconfig set, ignore kubeconfig
if gc.Request != nil && gc.Request.ClusterName != "" && gc.Cluster.Name != gc.Request.ClusterName {
gc.Cluster = nil
}
if gc.Cluster != nil {
return nil
}
}
}
// Get project name from boskos if running in Prow
if gc.Project == nil && common.IsProw() {
project, err := gc.boskosOps.AcquireGKEProject(nil)
if err != nil {
return fmt.Errorf("failed acquiring boskos project: '%v'", err)
}
gc.Project = &project.Name
}
if gc.Project == nil || *gc.Project == "" {
return errors.New("gcp project must be set")
}
if !common.IsProw() && gc.Cluster == nil {
gc.NeedsCleanup = true
}
log.Printf("Using project %q for running test", *gc.Project)
return nil
}
// Provider returns gke
func (gc *GKECluster) Provider() string {
return "gke"
@ -179,42 +154,50 @@ func (gc *GKECluster) Provider() string {
// in us-central1, and default BackupRegions are us-west1 and us-east1. If
// Region or Zone is provided then there is no retries
func (gc *GKECluster) Acquire() error {
if err := gc.initialize(); err != nil {
return fmt.Errorf("failed initializing with environment: '%v'", err)
if err := gc.checkEnvironment(); err != nil {
return fmt.Errorf("failed checking project/cluster from environment: '%v'", err)
}
gc.ensureProtected()
var err error
// Check if using existing cluster
// If gc.Cluster is discovered above, then the cluster exists and it's
// project and name matches with requested, use it
if gc.Cluster != nil {
gc.ensureProtected()
return nil
}
if gc.Request.SkipCreation {
log.Println("Skipping cluster creation as SkipCreation is set")
return nil
return errors.New("cannot acquire cluster if SkipCreation is set")
}
// If comes here we are very likely going to create a cluster, unless
// the cluster already exists
// Cleanup if cluster is created by this client
gc.NeedsCleanup = !common.IsProw()
// Get project name from boskos if running in Prow, otherwise it should fail
// since we don't know which project to use
if common.IsProw() {
project, err := gc.boskosOps.AcquireGKEProject(nil)
if err != nil {
return fmt.Errorf("failed acquiring boskos project: '%v'", err)
}
gc.Project = project.Name
}
if gc.Project == "" {
return errors.New("GCP project must be set")
}
gc.ensureProtected()
log.Printf("Identified project %s for cluster creation", gc.Project)
// Make a deep copy of the request struct, since the original request is supposed to be immutable
request := gc.Request.DeepCopy()
// Perform GKE specific cluster creation logics
if request.ClusterName == "" {
request.ClusterName, err = getResourceName(ClusterResource)
if err != nil {
return fmt.Errorf("failed getting cluster name: '%v'", err)
}
}
if request.Project == "" {
request.Project = *gc.Project
}
// We are going to use request for creating cluster, set its Project
request.Project = gc.Project
// Combine Region with BackupRegions, these will be the regions used for
// retrying creation logic
regions := []string{request.Region}
for _, br := range gc.Request.BackupRegions {
exist := false
for _, region := range regions {
if br == region {
exist = true
}
}
if !exist {
if br != request.Region {
regions = append(regions, br)
}
}
@ -228,25 +211,23 @@ func (gc *GKECluster) Acquire() error {
err = nil
clusterName := request.ClusterName
// Deleting cluster if it already exists
existingCluster, _ := gc.operations.GetCluster(*gc.Project, region, request.Zone, clusterName)
if existingCluster != nil {
log.Printf("Cluster %q already exists in region %q zone %q. Deleting...", clusterName, region, request.Zone)
err = gc.operations.DeleteCluster(*gc.Project, region, request.Zone, clusterName)
// Use cluster if it already exists and running
existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName)
if existingCluster != nil && existingCluster.Status == ClusterRunning {
gc.Cluster = existingCluster
return nil
}
// Creating cluster only if previous step succeeded
if err == nil {
// Creating cluster
log.Printf("Creating cluster %q in region %q zone %q with:\n%+v", clusterName, region, request.Zone, gc.Request)
err = gc.operations.CreateCluster(*gc.Project, region, request.Zone, rb)
if err == nil { // Get cluster at last
cluster, err = gc.operations.GetCluster(*gc.Project, region, request.Zone, rb.Cluster.Name)
}
err = gc.operations.CreateCluster(gc.Project, region, request.Zone, rb)
if err == nil {
cluster, err = gc.operations.GetCluster(gc.Project, region, request.Zone, rb.Cluster.Name)
}
if err != nil {
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
if gc.NeedsCleanup { // Delete half created cluster if it's user created
errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone)
gc.operations.DeleteClusterAsync(*gc.Project, region, request.Zone, clusterName)
gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName)
}
// Retry another region if cluster creation failed.
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
@ -267,15 +248,15 @@ func (gc *GKECluster) Acquire() error {
// Delete takes care of GKE cluster resource cleanup. It only release Boskos resource if running in
// Prow, otherwise deletes the cluster if marked NeedsCleanup
func (gc *GKECluster) Delete() error {
if err := gc.initialize(); err != nil {
return fmt.Errorf("failed initializing with environment: '%v'", err)
if err := gc.checkEnvironment(); err != nil {
return fmt.Errorf("failed checking project/cluster from environment: '%v'", err)
}
gc.ensureProtected()
// Release Boskos if running in Prow, will let Janitor taking care of
// clusters deleting
if common.IsProw() {
log.Printf("Releasing Boskos resource: '%v'", *gc.Project)
return gc.boskosOps.ReleaseGKEProject(nil, *gc.Project)
log.Printf("Releasing Boskos resource: '%v'", gc.Project)
return gc.boskosOps.ReleaseGKEProject(nil, gc.Project)
}
// NeedsCleanup is only true if running locally and cluster created by the
@ -290,7 +271,7 @@ func (gc *GKECluster) Delete() error {
}
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location)
if err := gc.operations.DeleteCluster(*gc.Project, region, zone, gc.Cluster.Name); err != nil {
if err := gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name); err != nil {
return fmt.Errorf("failed deleting cluster: '%v'", err)
}
return nil
@ -298,10 +279,10 @@ func (gc *GKECluster) Delete() error {
// ensureProtected ensures not operating on protected project/cluster
func (gc *GKECluster) ensureProtected() {
if gc.Project != nil {
if gc.Project != "" {
for _, pp := range protectedProjects {
if *gc.Project == pp {
log.Fatalf("project %q is protected", *gc.Project)
if gc.Project == pp {
log.Fatalf("project %q is protected", gc.Project)
}
}
}
@ -314,38 +295,56 @@ func (gc *GKECluster) ensureProtected() {
}
}
// checks for existing cluster by looking at kubeconfig,
// and sets up gc.Project and gc.Cluster properly, otherwise fail it.
// if project can be derived from gcloud, sets it up as well
// checkEnvironment checks environment set for kubeconfig and gcloud, and try to
// identify existing project/cluster if they are not set
//
// checks for existing cluster by looking at kubeconfig, if kubeconfig is set:
// - If it exists in GKE:
// - If Request doesn't contain project/clustername:
// - Use it
// - If Request contains any of project/clustername:
// - If the cluster matches with them:
// - Use it
// If cluster isn't discovered above, try to get project from gcloud
func (gc *GKECluster) checkEnvironment() error {
var err error
// if kubeconfig is configured, use it
output, err := common.StandardExec("kubectl", "config", "current-context")
// if kubeconfig is configured, try to use it
if err == nil {
currentContext := strings.TrimSpace(string(output))
log.Printf("kubeconfig is: %q", currentContext)
if strings.HasPrefix(currentContext, "gke_") {
// output should be in the form of gke_PROJECT_REGION_CLUSTER
parts := strings.Split(currentContext, "_")
if len(parts) != 4 { // fall through with warning
log.Printf("WARNING: ignoring kubectl current-context since it's malformed: '%s'", currentContext)
log.Printf("WARNING: ignoring kubectl current-context since it's malformed: %q", currentContext)
} else {
log.Printf("kubeconfig isn't empty, uses this cluster for running tests: %s", currentContext)
gc.Project = &parts[1]
project := parts[1]
location, clusterName := parts[2], parts[3]
region, zone := gke.RegionZoneFromLoc(location)
gc.Cluster, err = gc.operations.GetCluster(*gc.Project, region, zone, clusterName)
// Use the cluster only if project and clustername match
if (gc.Request.Project == "" || gc.Request.Project == project) && (gc.Request.ClusterName == "" || gc.Request.ClusterName == clusterName) {
cluster, err := gc.operations.GetCluster(project, region, zone, clusterName)
if err != nil {
return fmt.Errorf("couldn't find cluster %s in %s in %s, does it exist? %v", clusterName, *gc.Project, location, err)
return fmt.Errorf("couldn't find cluster %s in %s in %s, does it exist? %v", clusterName, project, location, err)
}
gc.Cluster = cluster
gc.Project = project
}
return nil
}
}
}
// When kubeconfig isn't set, the err isn't nil and output should be empty.
// If output isn't empty then this is unexpected error, should shout out
// directly
if err != nil && len(output) > 0 {
// this is unexpected error, should shout out directly
return fmt.Errorf("failed running kubectl config current-context: '%s'", string(output))
}
if gc.Project != "" {
return nil
}
// if gcloud is pointing to a project, use it
output, err = common.StandardExec("gcloud", "config", "get-value", "project")
if err != nil {
@ -353,8 +352,7 @@ func (gc *GKECluster) checkEnvironment() error {
}
if string(output) != "" {
project := strings.Trim(strings.TrimSpace(string(output)), "\n\r")
gc.Project = &project
gc.Project = project
}
return nil
}

View File

@ -56,18 +56,22 @@ func TestSetup(t *testing.T) {
regionOverride := "fooregion"
zoneOverride := "foozone"
fakeAddons := "fake-addon"
fakeBuildID := "1234"
datas := []struct {
r GKERequest
isProw bool
regionEnv, backupRegionEnv string
expClusterOperations *GKECluster
}{
{
// Defaults
// Defaults, not running in Prow
GKERequest{},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
@ -79,16 +83,37 @@ func TestSetup(t *testing.T) {
},
},
}, {
// Project provided
// Defaults, running in Prow
GKERequest{},
true,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls-1234",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
Region: "us-central1",
Zone: "",
Addons: nil,
},
BackupRegions: []string{"us-west1", "us-east1"},
},
},
}, {
// Project provided, not running in Prow
GKERequest{
Request: gke.Request{
Project: fakeProj,
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
Project: "b",
MinNodes: 1,
MaxNodes: 3,
@ -99,16 +124,66 @@ func TestSetup(t *testing.T) {
},
BackupRegions: []string{"us-west1", "us-east1"},
},
Project: &fakeProj,
Project: fakeProj,
NeedsCleanup: true,
},
}, {
// Cluster name provided
// Project provided, running in Prow
GKERequest{
Request: gke.Request{
Project: fakeProj,
},
},
true,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls-1234",
Project: "b",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
Region: "us-central1",
Zone: "",
Addons: nil,
},
BackupRegions: []string{"us-west1", "us-east1"},
},
Project: fakeProj,
NeedsCleanup: true,
},
}, {
// Cluster name provided, not running in Prow
GKERequest{
Request: gke.Request{
ClusterName: "predefined-cluster-name",
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "predefined-cluster-name",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
Region: "us-central1",
Zone: "",
Addons: nil,
},
BackupRegions: []string{"us-west1", "us-east1"},
},
},
}, {
// Cluster name provided, running in Prow
GKERequest{
Request: gke.Request{
ClusterName: "predefined-cluster-name",
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
@ -135,10 +210,12 @@ func TestSetup(t *testing.T) {
Zone: zoneOverride,
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 2,
MaxNodes: 4,
NodeType: "foonode",
@ -159,10 +236,12 @@ func TestSetup(t *testing.T) {
Region: regionOverride,
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 2,
MaxNodes: 4,
NodeType: "foonode",
@ -176,10 +255,12 @@ func TestSetup(t *testing.T) {
}, {
// Set env Region
GKERequest{},
false,
"customregion", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
@ -193,10 +274,12 @@ func TestSetup(t *testing.T) {
}, {
// Set env backupzone
GKERequest{},
false,
"", "backupregion1 backupregion2",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
@ -214,10 +297,12 @@ func TestSetup(t *testing.T) {
Addons: []string{fakeAddons},
},
},
false,
"", "",
&GKECluster{
Request: &GKERequest{
Request: gke.Request{
ClusterName: "kpkg-e2e-cls",
MinNodes: 1,
MaxNodes: 3,
NodeType: "n1-standard-4",
@ -258,11 +343,10 @@ func TestSetup(t *testing.T) {
out = []byte("")
err = fmt.Errorf("kubectl not set")
default:
out, err = oldExecFunc(name)
out, err = oldExecFunc(name, args...)
}
return out, err
}
for _, data := range datas {
common.GetOSEnv = func(s string) string {
switch s {
@ -270,6 +354,13 @@ func TestSetup(t *testing.T) {
return data.regionEnv
case "E2E_CLUSTER_BACKUP_REGIONS":
return data.backupRegionEnv
case "BUILD_NUMBER":
return fakeBuildID
case "PROW_JOB_ID": // needed to mock IsProw()
if data.isProw {
return "fake_job_id"
}
return ""
}
return oldEnvFunc(s)
}
@ -287,134 +378,6 @@ func TestSetup(t *testing.T) {
}
}
func TestInitialize(t *testing.T) {
customProj := "customproj"
fakeBoskosProj := "fake-boskos-proj-0"
datas := []struct {
project *string
clusterExist bool
gcloudSet bool
isProw bool
boskosProjs []string
expProj *string
expCluster *container.Cluster
expErr error
}{
{
// User defines project
&fakeProj, false, false, false, []string{}, &fakeProj, nil, nil,
}, {
// User defines project, and running in Prow
&fakeProj, false, false, true, []string{}, &fakeProj, nil, nil,
}, {
// kubeconfig set
nil, true, false, false, []string{}, &fakeProj, &container.Cluster{
Name: "d",
Location: "c",
Status: "RUNNING",
NodePools: []*container.NodePool{
{
Name: "default-pool",
},
},
}, nil,
}, {
// kubeconfig not set and gcloud set
nil, false, true, false, []string{}, &customProj, nil, nil,
}, {
// kubeconfig not set and gcloud set, running in Prow and boskos not available
nil, false, false, true, []string{}, nil, nil, fmt.Errorf("failed acquiring boskos project: 'no GKE project available'"),
}, {
// kubeconfig not set and gcloud set, running in Prow and boskos available
nil, false, false, true, []string{fakeBoskosProj}, &fakeBoskosProj, nil, nil,
}, {
// kubeconfig not set and gcloud set, not in Prow and boskos not available
nil, false, false, false, []string{}, nil, nil, fmt.Errorf("gcp project must be set"),
}, {
// kubeconfig not set and gcloud set, not in Prow and boskos available
nil, false, false, false, []string{fakeBoskosProj}, nil, nil, fmt.Errorf("gcp project must be set"),
},
}
oldEnvFunc := common.GetOSEnv
oldExecFunc := common.StandardExec
defer func() {
// restore
common.GetOSEnv = oldEnvFunc
common.StandardExec = oldExecFunc
}()
for _, data := range datas {
fgc := setupFakeGKECluster()
if data.project != nil {
fgc.Project = data.project
}
if data.clusterExist {
parts := strings.Split("gke_b_c_d", "_")
fgc.operations.CreateClusterAsync(parts[1], parts[2], "", &container.CreateClusterRequest{
Cluster: &container.Cluster{
Name: parts[3],
},
ProjectId: parts[1],
})
}
// Set up fake boskos
for _, bos := range data.boskosProjs {
fgc.boskosOps.(*boskosFake.FakeBoskosClient).NewGKEProject(bos)
}
// mock for testing
common.StandardExec = func(name string, args ...string) ([]byte, error) {
var out []byte
var err error
switch name {
case "gcloud":
out = []byte("")
err = nil
if data.gcloudSet {
out = []byte(customProj)
err = nil
}
case "kubectl":
out = []byte("")
err = fmt.Errorf("kubectl not set")
if data.clusterExist {
out = []byte("gke_b_c_d")
err = nil
}
default:
out, err = oldExecFunc(name, args...)
}
return out, err
}
// Mock IsProw()
common.GetOSEnv = func(s string) string {
var res string
switch s {
case "PROW_JOB_ID":
if data.isProw {
res = "fake_job_id"
}
default:
res = oldEnvFunc(s)
}
return res
}
err := fgc.initialize()
errMsg := fmt.Sprintf("test initialize with:\n\tuser defined project: '%v'\n\tkubeconfig set: '%v'\n\tgcloud set: '%v'\n\trunning in prow: '%v'\n\tboskos set: '%v'",
data.project, data.clusterExist, data.gcloudSet, data.isProw, data.boskosProjs)
if !reflect.DeepEqual(data.expErr, err) {
t.Errorf("%s\nerror got: '%v'\nerror want: '%v'", errMsg, err, data.expErr)
}
if dif := cmp.Diff(data.expCluster, fgc.Cluster); dif != "" {
t.Errorf("%s\nCluster got(+) is different from wanted(-)\n%v", errMsg, dif)
}
if dif := cmp.Diff(data.expProj, fgc.Project); dif != "" {
t.Errorf("%s\nProject got(+) is different from wanted(-)\n%v", errMsg, dif)
}
}
}
func TestGKECheckEnvironment(t *testing.T) {
datas := []struct {
kubectlOut string
@ -422,40 +385,57 @@ func TestGKECheckEnvironment(t *testing.T) {
gcloudOut string
gcloudErr error
clusterExist bool
expProj *string
requestClusterName string
requestProject string
expProj string
expCluster *string
expErr error
}{
{
// Base condition, kubectl shouldn't return empty string if there is no error
"", nil, "", nil, false, nil, nil, nil,
"", nil, "", nil, false, "", "", "", nil, nil,
}, {
// kubeconfig not set and gcloud not set
"", fmt.Errorf("kubectl not set"), "", nil, false, nil, nil, nil,
"", fmt.Errorf("kubectl not set"), "", nil, false, "", "", "", nil, nil,
}, {
// kubeconfig failed
"failed", fmt.Errorf("kubectl other err"), "", nil, false, nil, nil, fmt.Errorf("failed running kubectl config current-context: 'failed'"),
"failed", fmt.Errorf("kubectl other err"), "", nil, false, "", "", "", nil, fmt.Errorf("failed running kubectl config current-context: 'failed'"),
}, {
// kubeconfig returned something other than "gke_PROJECT_REGION_CLUSTER"
"gke_b_c", nil, "", nil, false, nil, nil, nil,
"gke_b_c", nil, "", nil, false, "", "", "", nil, nil,
}, {
// kubeconfig returned something other than "gke_PROJECT_REGION_CLUSTER"
"gke_b_c_d_e", nil, "", nil, false, nil, nil, nil,
"gke_b_c_d_e", nil, "", nil, false, "", "", "", nil, nil,
}, {
// kubeconfig correctly set and cluster exist
"gke_b_c_d", nil, "", nil, true, &fakeProj, &fakeCluster, nil,
"gke_b_c_d", nil, "", nil, true, "d", "b", fakeProj, &fakeCluster, nil,
}, {
// kubeconfig correctly set and cluster exist, project wasn't requested
"gke_b_c_d", nil, "", nil, true, "d", "", fakeProj, &fakeCluster, nil,
}, {
// kubeconfig correctly set and cluster exist, project doesn't match
"gke_b_c_d", nil, "", nil, true, "d", "doesntexist", "", nil, nil,
}, {
// kubeconfig correctly set and cluster exist, cluster wasn't requested
"gke_b_c_d", nil, "", nil, true, "", "b", fakeProj, &fakeCluster, nil,
}, {
// kubeconfig correctly set and cluster exist, cluster doesn't match
"gke_b_c_d", nil, "", nil, true, "doesntexist", "b", "", nil, nil,
}, {
// kubeconfig correctly set and cluster exist, none of project/cluster requested
"gke_b_c_d", nil, "", nil, true, "", "", fakeProj, &fakeCluster, nil,
}, {
// kubeconfig correctly set, but cluster doesn't exist
"gke_b_c_d", nil, "", nil, false, &fakeProj, nil, fmt.Errorf("couldn't find cluster d in b in c, does it exist? cluster not found"),
"gke_b_c_d", nil, "", nil, false, "d", "", "", nil, fmt.Errorf("couldn't find cluster d in b in c, does it exist? cluster not found"),
}, {
// kubeconfig not set and gcloud failed
"", fmt.Errorf("kubectl not set"), "", fmt.Errorf("gcloud failed"), false, nil, nil, fmt.Errorf("failed getting gcloud project: 'gcloud failed'"),
"", fmt.Errorf("kubectl not set"), "", fmt.Errorf("gcloud failed"), false, "", "", "", nil, fmt.Errorf("failed getting gcloud project: 'gcloud failed'"),
}, {
// kubeconfig not set and gcloud not set
"", fmt.Errorf("kubectl not set"), "", nil, false, nil, nil, nil,
"", fmt.Errorf("kubectl not set"), "", nil, false, "", "", "", nil, nil,
}, {
// kubeconfig not set and gcloud set
"", fmt.Errorf("kubectl not set"), "b", nil, false, &fakeProj, nil, nil,
"", fmt.Errorf("kubectl not set"), "b", nil, false, "", "", fakeProj, nil, nil,
},
}
@ -476,6 +456,8 @@ func TestGKECheckEnvironment(t *testing.T) {
ProjectId: parts[1],
})
}
fgc.Request.ClusterName = data.requestClusterName
fgc.Request.Project = data.requestProject
// mock for testing
common.StandardExec = func(name string, args ...string) ([]byte, error) {
var out []byte
@ -497,14 +479,15 @@ func TestGKECheckEnvironment(t *testing.T) {
gotCluster = &fgc.Cluster.Name
}
errMsg := fmt.Sprintf("check environment with:\n\tkubectl output: %q\n\t\terror: '%v'\n\tgcloud output: %q\n\t\t"+
"error: '%v'\n\t\tclustername requested: %q\n\t\tproject requested: %q",
data.kubectlOut, data.kubectlErr, data.gcloudOut, data.gcloudErr, data.requestClusterName, data.requestProject)
if !reflect.DeepEqual(err, data.expErr) || !reflect.DeepEqual(fgc.Project, data.expProj) || !reflect.DeepEqual(gotCluster, data.expCluster) {
t.Errorf("check environment with:\n\tkubectl output: %q\n\t\terror: '%v'\n\tgcloud output: %q\n\t\t"+
"error: '%v'\ngot: project - '%v', cluster - '%v', err - '%v'\nwant: project - '%v', cluster - '%v', err - '%v'",
data.kubectlOut, data.kubectlErr, data.gcloudOut, data.gcloudErr, fgc.Project, fgc.Cluster, err, data.expProj, data.expCluster, data.expErr)
t.Errorf("%s\ngot: project - %q, cluster - '%v', err - '%v'\nwant: project - '%v', cluster - '%v', err - '%v'",
errMsg, fgc.Project, fgc.Cluster, err, data.expProj, data.expCluster, data.expErr)
}
errMsg := fmt.Sprintf("check environment with:\n\tkubectl output: %q\n\t\terror: '%v'\n\tgcloud output: %q\n\t\terror: '%v'",
data.kubectlOut, data.kubectlErr, data.gcloudOut, data.gcloudErr)
if !reflect.DeepEqual(data.expErr, err) {
t.Errorf("%s\nerror got: '%v'\nerror want: '%v'", errMsg, data.expErr, err)
}
@ -519,103 +502,26 @@ func TestGKECheckEnvironment(t *testing.T) {
func TestAcquire(t *testing.T) {
predefinedClusterName := "predefined-cluster-name"
fakeClusterName := "kpkg-e2e-cls-1234"
fakeBoskosProj := "fake-boskos-proj-0"
fakeBuildID := "1234"
datas := []struct {
isProw bool
project string
existCluster *container.Cluster
predefinedClusterName string
kubeconfigSet bool
addons []string
nextOpStatus []string
boskosProjs []string
skipCreation bool
expCluster *container.Cluster
expErr error
expPanic bool
}{
{
// cluster already found
&container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, "", true, []string{}, []string{}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
},
},
}, nil, false,
// cluster not exist, running in Prow and boskos not available
true, fakeProj, nil, []string{}, []string{}, []string{}, false, nil, fmt.Errorf("failed acquiring boskos project: 'no GKE project available'"), false,
}, {
// cluster already found and clustername predefined
&container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, predefinedClusterName, true, []string{}, []string{}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
},
},
}, nil, false,
}, {
// cluster exists but not set in kubeconfig, cluster will be deleted
// then created
&container.Cluster{
Name: fakeClusterName,
Location: "us-central1",
}, "", false, []string{}, []string{}, false, &container.Cluster{
Name: fakeClusterName,
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster exists but not set in kubeconfig, cluster deletion
// failed, will recreate in us-west1
&container.Cluster{
Name: fakeClusterName,
Location: "us-central1",
}, "", false, []string{}, []string{"BAD"}, false, &container.Cluster{
Name: fakeClusterName,
Location: "us-west1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster exists but not set in kubeconfig, clusterName defined
&container.Cluster{
Name: fakeClusterName,
Location: "us-central1",
}, predefinedClusterName, false, []string{}, []string{}, false, &container.Cluster{
// cluster not exist, running in Prow and boskos available
true, fakeProj, nil, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: predefinedClusterName,
Location: "us-central1",
Status: "RUNNING",
@ -633,8 +539,11 @@ func TestAcquire(t *testing.T) {
},
}, nil, false,
}, {
// cluster not exist, but clustername defined
nil, predefinedClusterName, false, []string{}, []string{}, false, &container.Cluster{
// cluster not exist, project not set, running in Prow and boskos not available
true, "", nil, []string{}, []string{}, []string{}, false, nil, fmt.Errorf("failed acquiring boskos project: 'no GKE project available'"), false,
}, {
// cluster not exist, project not set, running in Prow and boskos available
true, "", nil, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: predefinedClusterName,
Location: "us-central1",
Status: "RUNNING",
@ -652,12 +561,109 @@ func TestAcquire(t *testing.T) {
},
}, nil, false,
}, {
// cluster creation succeeded
nil, "", false, []string{}, []string{}, true, nil, nil, false,
// project not set, not in Prow and boskos not available
false, "", nil, []string{}, []string{}, []string{}, false, nil, fmt.Errorf("GCP project must be set"), false,
}, {
// project not set, not in Prow and boskos available
false, "", nil, []string{}, []string{}, []string{fakeBoskosProj}, false, nil, fmt.Errorf("GCP project must be set"), false,
}, {
// cluster exists, project set, running in Prow
true, fakeProj, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster exists, project set and not running in Prow
false, fakeProj, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster exist, not running in Prow and skip creation
false, fakeProj, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster exist, running in Prow and skip creation
true, fakeProj, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
}, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
}, nil, false,
}, {
// cluster not exist, not running in Prow and skip creation
false, fakeProj, nil, []string{}, []string{}, []string{fakeBoskosProj}, true, nil, fmt.Errorf("cannot acquire cluster if SkipCreation is set"), false,
}, {
// cluster not exist, running in Prow and skip creation
true, fakeProj, nil, []string{}, []string{}, []string{fakeBoskosProj}, true, nil, fmt.Errorf("cannot acquire cluster if SkipCreation is set"), false,
}, {
// skipped cluster creation as SkipCreation is requested
nil, "", false, []string{}, []string{}, false, &container.Cluster{
Name: fakeClusterName,
true, fakeProj, nil, []string{}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: predefinedClusterName,
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
@ -675,8 +681,8 @@ func TestAcquire(t *testing.T) {
}, nil, false,
}, {
// cluster creation succeeded with addon
nil, "", false, []string{"istio"}, []string{}, false, &container.Cluster{
Name: fakeClusterName,
true, fakeProj, nil, []string{"istio"}, []string{}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: predefinedClusterName,
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{
@ -696,8 +702,8 @@ func TestAcquire(t *testing.T) {
}, nil, false,
}, {
// cluster creation succeeded retry
nil, "", false, []string{}, []string{"PENDING"}, false, &container.Cluster{
Name: fakeClusterName,
true, fakeProj, nil, []string{}, []string{"PENDING"}, []string{fakeBoskosProj}, false, &container.Cluster{
Name: predefinedClusterName,
Location: "us-west1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
@ -715,25 +721,26 @@ func TestAcquire(t *testing.T) {
}, nil, false,
}, {
// cluster creation failed all retry
nil, "", false, []string{}, []string{"PENDING", "PENDING", "PENDING"}, false, nil, fmt.Errorf("timed out waiting"), false,
true, fakeProj, nil, []string{}, []string{"PENDING", "PENDING", "PENDING"}, []string{fakeBoskosProj}, false, nil, fmt.Errorf("timed out waiting"), false,
}, {
// cluster creation went bad state
nil, "", false, []string{}, []string{"BAD", "BAD", "BAD"}, false, nil, fmt.Errorf("unexpected operation status: %q", "BAD"), false,
true, fakeProj, nil, []string{}, []string{"BAD", "BAD", "BAD"}, []string{fakeBoskosProj}, false, nil, fmt.Errorf("unexpected operation status: %q", "BAD"), false,
}, {
// bad addon, should get a panic
nil, "", false, []string{"bad_addon"}, []string{}, false, nil, nil, true,
true, fakeProj, nil, []string{"bad_addon"}, []string{}, []string{fakeBoskosProj}, false, nil, nil, true,
},
}
// mock GetOSEnv for testing
oldFunc := common.GetOSEnv
oldEnvFunc := common.GetOSEnv
oldExecFunc := common.StandardExec
// mock timeout so it doesn't run forever
oldCreationTimeout := gkeFake.CreationTimeout
// wait function polls every 500ms, give it 1000 to avoid random timeout
gkeFake.CreationTimeout = 1000 * time.Millisecond
defer func() {
// restore
common.GetOSEnv = oldFunc
common.GetOSEnv = oldEnvFunc
common.StandardExec = oldExecFunc
gkeFake.CreationTimeout = oldCreationTimeout
}()
@ -743,44 +750,51 @@ func TestAcquire(t *testing.T) {
t.Errorf("got unexpected panic: '%v'", r)
}
}()
// mock for testing
common.StandardExec = func(name string, args ...string) ([]byte, error) {
var out []byte
var err error
switch name {
case "gcloud":
out = []byte("")
err = nil
if data.project != "" {
out = []byte(data.project)
err = nil
}
case "kubectl":
out = []byte("")
err = fmt.Errorf("kubectl not set")
if data.existCluster != nil {
context := fmt.Sprintf("gke_%s_%s_%s", data.project, data.existCluster.Location, data.existCluster.Name)
out = []byte(context)
err = nil
}
default:
out, err = oldExecFunc(name, args...)
}
return out, err
}
common.GetOSEnv = func(key string) string {
switch key {
case "BUILD_NUMBER":
return fakeBuildID
case "PROW_JOB_ID": // needed to mock IsProw()
return "jobid"
if data.isProw {
return "fake_job_id"
}
return oldFunc(key)
return ""
}
return oldEnvFunc(key)
}
fgc := setupFakeGKECluster()
opCount := 0
if data.existCluster != nil {
opCount++
ac := &container.AddonsConfig{}
for _, addon := range data.addons {
if addon == "istio" {
ac.IstioConfig = &container.IstioConfig{Disabled: false}
// Set up fake boskos
for _, bos := range data.boskosProjs {
fgc.boskosOps.(*boskosFake.FakeBoskosClient).NewGKEProject(bos)
}
}
fgc.operations.CreateClusterAsync(fakeProj, data.existCluster.Location, "", &container.CreateClusterRequest{
Cluster: &container.Cluster{
Name: data.existCluster.Name,
AddonsConfig: ac,
},
ProjectId: fakeProj,
})
if data.kubeconfigSet {
fgc.Cluster, _ = fgc.operations.GetCluster(fakeProj, data.existCluster.Location, "", data.existCluster.Name)
}
}
fgc.Project = &fakeProj
for i, status := range data.nextOpStatus {
fgc.operations.(*gkeFake.GKESDKClient).OpStatus[strconv.Itoa(opCount+i)] = status
}
fgc.Request = &GKERequest{
Request: gke.Request{
ClusterName: data.predefinedClusterName,
ClusterName: predefinedClusterName,
MinNodes: DefaultGKEMinNodes,
MaxNodes: DefaultGKEMaxNodes,
NodeType: DefaultGKENodeType,
@ -790,6 +804,20 @@ func TestAcquire(t *testing.T) {
},
BackupRegions: DefaultGKEBackupRegions,
}
opCount := 0
if data.existCluster != nil {
opCount++
fgc.Request.ClusterName = data.existCluster.Name
rb, _ := gke.NewCreateClusterRequest(&fgc.Request.Request)
fgc.operations.CreateClusterAsync(data.project, data.existCluster.Location, "", rb)
fgc.Cluster, _ = fgc.operations.GetCluster(data.project, data.existCluster.Location, "", data.existCluster.Name)
}
fgc.Project = data.project
for i, status := range data.nextOpStatus {
fgc.operations.(*gkeFake.GKESDKClient).OpStatus[strconv.Itoa(opCount+i)] = status
}
if data.skipCreation {
fgc.Request.SkipCreation = true
}
@ -797,11 +825,11 @@ func TestAcquire(t *testing.T) {
// goroutine
fgc.NeedsCleanup = false
err := fgc.Acquire()
errMsg := fmt.Sprintf("testing acquiring cluster, with:\n\texisting cluster: '%+v'\n\tSkip creation: '%+v'\n\t"+
"next operations outcomes: '%v'\n\tkubeconfig set: '%v'\n\taddons: '%v'",
data.existCluster, data.skipCreation, data.nextOpStatus, data.kubeconfigSet, data.addons)
errMsg := fmt.Sprintf("testing acquiring cluster, with:\n\tisProw: '%v'\n\tproject: '%v'\n\texisting cluster: '%+v'\n\tSkip creation: '%+v'\n\t"+
"next operations outcomes: '%v'\n\taddons: '%v'\n\tboskos projects: '%v'",
data.isProw, data.project, data.existCluster, data.skipCreation, data.nextOpStatus, data.addons, data.boskosProjs)
if !reflect.DeepEqual(err, data.expErr) {
t.Errorf("%s\nerror got: '%v'\nerror want: '%v'", errMsg, data.expErr, err)
t.Errorf("%s\nerror got: '%v'\nerror want: '%v'", errMsg, err, data.expErr)
}
if dif := cmp.Diff(data.expCluster, fgc.Cluster); dif != "" {
t.Errorf("%s\nCluster got(+) is different from wanted(-)\n%v", errMsg, dif)
@ -835,11 +863,18 @@ func TestDelete(t *testing.T) {
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
},
nil,
}, {
@ -899,11 +934,18 @@ func TestDelete(t *testing.T) {
Name: "customcluster",
Location: "us-central1",
Status: "RUNNING",
AddonsConfig: &container.AddonsConfig{},
NodePools: []*container.NodePool{
{
Name: "default-pool",
InitialNodeCount: DefaultGKEMinNodes,
Config: &container.NodeConfig{MachineType: "n1-standard-4"},
Autoscaling: &container.NodePoolAutoscaling{Enabled: true, MaxNodeCount: 3, MinNodeCount: 1},
},
},
MasterAuth: &container.MasterAuth{
Username: "admin",
},
},
nil,
},
@ -932,16 +974,22 @@ func TestDelete(t *testing.T) {
return oldFunc(key)
}
fgc := setupFakeGKECluster()
fgc.Project = &fakeProj
fgc.Project = fakeProj
fgc.NeedsCleanup = data.NeedsCleanup
if data.cluster != nil {
fgc.operations.CreateClusterAsync(fakeProj, data.cluster.Location, "", &container.CreateClusterRequest{
Cluster: &container.Cluster{
Name: data.cluster.Name,
fgc.Request = &GKERequest{
Request: gke.Request{
MinNodes: DefaultGKEMinNodes,
MaxNodes: DefaultGKEMaxNodes,
NodeType: DefaultGKENodeType,
Region: DefaultGKERegion,
Zone: "",
},
ProjectId: fakeProj,
})
fgc.Cluster = data.cluster
}
if data.cluster != nil {
fgc.Request.ClusterName = data.cluster.Name
rb, _ := gke.NewCreateClusterRequest(&fgc.Request.Request)
fgc.operations.CreateClusterAsync(fakeProj, data.cluster.Location, "", rb)
fgc.Cluster, _ = fgc.operations.GetCluster(fakeProj, data.cluster.Location, "", data.cluster.Name)
}
// Set up fake boskos
for _, bos := range data.boskosState {

View File

@ -107,11 +107,7 @@ func (fgsc *GKESDKClient) CreateClusterAsync(
Location: location,
Status: "RUNNING",
AddonsConfig: rb.Cluster.AddonsConfig,
NodePools: []*container.NodePool{
{
Name: "default-pool",
},
},
NodePools: rb.Cluster.NodePools,
}
if rb.Cluster.NodePools != nil {
cluster.NodePools = rb.Cluster.NodePools