mirror of https://github.com/knative/caching.git
[master] Auto-update dependencies (#243)
Produced via: `./hack/update-deps.sh --upgrade && ./hack/update-codegen.sh` /assign n3wscott vagababov /cc n3wscott vagababov
This commit is contained in:
parent
4f26948a1b
commit
6bd9f38fb0
|
@ -966,7 +966,7 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:ae72dd6d2950c1ffe31061c827bb8904fce102f9134c4309cca0fe1ac5864c26"
|
digest = "1:9b83d1c207c67003de88414626add45e9bce8c47502e07cbe6cb5fd48e39f2df"
|
||||||
name = "knative.dev/pkg"
|
name = "knative.dev/pkg"
|
||||||
packages = [
|
packages = [
|
||||||
"apis",
|
"apis",
|
||||||
|
@ -986,7 +986,7 @@
|
||||||
"reconciler",
|
"reconciler",
|
||||||
]
|
]
|
||||||
pruneopts = "T"
|
pruneopts = "T"
|
||||||
revision = "55250e6aab62f767b8680a12ddeb771c519d78fe"
|
revision = "92cdec5b35931192590f5d8affad86898b6c50d6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
@ -997,7 +997,7 @@
|
||||||
"tools/dep-collector",
|
"tools/dep-collector",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "cdec09517ea85b37691ab4a81ee85d74e7e91b7e"
|
revision = "e84f0d1a364732918a6635cb07d895ff77ecba76"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||||
|
|
|
@ -1362,14 +1362,14 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:5da0f15efcd7dafa6296be9cd4a630fcb5253cf202e54638299221ae7df419ab"
|
digest = "1:d8858077778bca77705b26d5b5262bf33a6bfbaa701fffca1578fd7ef4c4b975"
|
||||||
name = "knative.dev/test-infra"
|
name = "knative.dev/test-infra"
|
||||||
packages = [
|
packages = [
|
||||||
"scripts",
|
"scripts",
|
||||||
"tools/dep-collector",
|
"tools/dep-collector",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "5bdfbd623938f0cc4a260bf06c503bbb0a76d6f9"
|
revision = "cdec09517ea85b37691ab4a81ee85d74e7e91b7e"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||||
|
|
|
@ -334,7 +334,7 @@ func (r conditionsImpl) findUnhappyDependent() *Condition {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If something was not initialized.
|
// If something was not initialized.
|
||||||
if len(r.dependents) != len(conditions) {
|
if len(r.dependents) > len(conditions) {
|
||||||
return &Condition{
|
return &Condition{
|
||||||
Status: corev1.ConditionUnknown,
|
Status: corev1.ConditionUnknown,
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
@ -89,15 +90,19 @@ func GetConfig(masterURL, kubeconfig string) (*rest.Config, error) {
|
||||||
// or via reading a configMap from the API.
|
// or via reading a configMap from the API.
|
||||||
// The context is expected to be initialized with injection.
|
// The context is expected to be initialized with injection.
|
||||||
func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
|
func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
|
||||||
loggingConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(logging.ConfigMapName(), metav1.GetOptions{})
|
var loggingConfigMap *corev1.ConfigMap
|
||||||
if err != nil {
|
// These timeout and retry interval are set by heuristics.
|
||||||
if apierrors.IsNotFound(err) {
|
// e.g. istio sidecar needs a few seconds to configure the pod network.
|
||||||
return logging.NewConfigFromMap(nil)
|
if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
|
||||||
} else {
|
var err error
|
||||||
|
loggingConfigMap, err = kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(logging.ConfigMapName(), metav1.GetOptions{})
|
||||||
|
return err == nil || apierrors.IsNotFound(err), nil
|
||||||
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if loggingConfigMap == nil {
|
||||||
|
return logging.NewConfigFromMap(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return logging.NewConfigFromConfigMap(loggingConfigMap)
|
return logging.NewConfigFromConfigMap(loggingConfigMap)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,8 @@ const (
|
||||||
// OpenCensus is used to export to the OpenCensus Agent / Collector,
|
// OpenCensus is used to export to the OpenCensus Agent / Collector,
|
||||||
// which can send to many other services.
|
// which can send to many other services.
|
||||||
OpenCensus metricsBackend = "opencensus"
|
OpenCensus metricsBackend = "opencensus"
|
||||||
|
// None is used to export, well, nothing.
|
||||||
|
None metricsBackend = "none"
|
||||||
|
|
||||||
defaultBackendEnvName = "DEFAULT_METRICS_BACKEND"
|
defaultBackendEnvName = "DEFAULT_METRICS_BACKEND"
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ type ObservabilityConfig struct {
|
||||||
EnableProbeRequestLog bool
|
EnableProbeRequestLog bool
|
||||||
|
|
||||||
// RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus,
|
// RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus,
|
||||||
// Stackdriver.
|
// Stackdriver. "None" disables all backends.
|
||||||
RequestMetricsBackend string
|
RequestMetricsBackend string
|
||||||
|
|
||||||
// EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from
|
// EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from
|
||||||
|
|
|
@ -195,6 +195,8 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.
|
||||||
e, err = newStackdriverExporter(config, logger)
|
e, err = newStackdriverExporter(config, logger)
|
||||||
case Prometheus:
|
case Prometheus:
|
||||||
e, err = newPrometheusExporter(config, logger)
|
e, err = newPrometheusExporter(config, logger)
|
||||||
|
case None:
|
||||||
|
e, err = nil, nil
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination)
|
err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
Copyright 2020 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustermanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultGKEMinNodes = 1
|
||||||
|
defaultGKEMaxNodes = 3
|
||||||
|
defaultGKENodeType = "e2-standard-4"
|
||||||
|
defaultGKERegion = "us-central1"
|
||||||
|
defaultGKEZone = ""
|
||||||
|
regionEnv = "E2E_CLUSTER_REGION"
|
||||||
|
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
|
||||||
|
defaultResourceType = boskos.GKEProjectResource
|
||||||
|
|
||||||
|
clusterRunning = "RUNNING"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
protectedProjects = []string{"knative-tests"}
|
||||||
|
protectedClusters = []string{"knative-prow"}
|
||||||
|
defaultGKEBackupRegions = []string{"us-west1", "us-east1"}
|
||||||
|
|
||||||
|
// If one of the error patterns below is matched, it would be recommended to
|
||||||
|
// retry creating the cluster in a different region/zone.
|
||||||
|
// - stockout (https://github.com/knative/test-infra/issues/592)
|
||||||
|
// - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694)
|
||||||
|
retryableCreationErrors = []*regexp.Regexp{
|
||||||
|
regexp.MustCompile(".*Master version \"[0-9a-z\\-.]+\" is unsupported.*"),
|
||||||
|
regexp.MustCompile(".*No valid versions with the prefix \"[0-9.]+\" found.*"),
|
||||||
|
regexp.MustCompile(".*does not have enough resources available to fulfill.*"),
|
||||||
|
regexp.MustCompile(".*only \\d+ nodes out of \\d+ have registered; this is likely due to Nodes failing to start correctly.*"),
|
||||||
|
}
|
||||||
|
)
|
|
@ -30,25 +30,6 @@ import (
|
||||||
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
|
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultGKEMinNodes = 1
|
|
||||||
DefaultGKEMaxNodes = 3
|
|
||||||
DefaultGKENodeType = "e2-standard-4"
|
|
||||||
DefaultGKERegion = "us-central1"
|
|
||||||
DefaultGKEZone = ""
|
|
||||||
regionEnv = "E2E_CLUSTER_REGION"
|
|
||||||
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
|
|
||||||
DefaultResourceType = boskos.GKEProjectResource
|
|
||||||
|
|
||||||
ClusterRunning = "RUNNING"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultGKEBackupRegions = []string{"us-west1", "us-east1"}
|
|
||||||
protectedProjects = []string{"knative-tests"}
|
|
||||||
protectedClusters = []string{"knative-prow"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GKEClient implements Client
|
// GKEClient implements Client
|
||||||
type GKEClient struct {
|
type GKEClient struct {
|
||||||
}
|
}
|
||||||
|
@ -65,10 +46,6 @@ type GKERequest struct {
|
||||||
// SkipCreation: skips cluster creation
|
// SkipCreation: skips cluster creation
|
||||||
SkipCreation bool
|
SkipCreation bool
|
||||||
|
|
||||||
// NeedsCleanup: enforce clean up if given this option, used when running
|
|
||||||
// locally
|
|
||||||
NeedsCleanup bool
|
|
||||||
|
|
||||||
// ResourceType: the boskos resource type to acquire to hold the cluster in create
|
// ResourceType: the boskos resource type to acquire to hold the cluster in create
|
||||||
ResourceType string
|
ResourceType string
|
||||||
}
|
}
|
||||||
|
@ -80,9 +57,9 @@ type GKECluster struct {
|
||||||
Project string
|
Project string
|
||||||
// IsBoskos is true if the GCP project used is managed by boskos
|
// IsBoskos is true if the GCP project used is managed by boskos
|
||||||
IsBoskos bool
|
IsBoskos bool
|
||||||
// NeedsCleanup tells whether the cluster needs to be deleted afterwards
|
// AsyncCleanup tells whether the cluster needs to be deleted asynchronously afterwards
|
||||||
// This probably should be part of task wrapper's logic
|
// It should be true on Prow but false on local.
|
||||||
NeedsCleanup bool
|
AsyncCleanup bool
|
||||||
Cluster *container.Cluster
|
Cluster *container.Cluster
|
||||||
operations gke.SDKOperations
|
operations gke.SDKOperations
|
||||||
boskosOps boskos.Operation
|
boskosOps boskos.Operation
|
||||||
|
@ -93,45 +70,47 @@ type GKECluster struct {
|
||||||
func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
|
func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
|
||||||
gc := &GKECluster{}
|
gc := &GKECluster{}
|
||||||
|
|
||||||
if r.Project != "" { // use provided project and create cluster
|
if r.Project != "" { // use provided project to create cluster
|
||||||
gc.Project = r.Project
|
gc.Project = r.Project
|
||||||
gc.NeedsCleanup = true
|
gc.AsyncCleanup = true
|
||||||
|
} else if common.IsProw() { // if no project is provided and is on Prow, use boskos
|
||||||
|
gc.IsBoskos = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.MinNodes == 0 {
|
if r.MinNodes == 0 {
|
||||||
r.MinNodes = DefaultGKEMinNodes
|
r.MinNodes = defaultGKEMinNodes
|
||||||
}
|
}
|
||||||
if r.MaxNodes == 0 {
|
if r.MaxNodes == 0 {
|
||||||
r.MaxNodes = DefaultGKEMaxNodes
|
r.MaxNodes = defaultGKEMaxNodes
|
||||||
// We don't want MaxNodes < MinNodes
|
// We don't want MaxNodes < MinNodes
|
||||||
if r.MinNodes > r.MaxNodes {
|
if r.MinNodes > r.MaxNodes {
|
||||||
r.MaxNodes = r.MinNodes
|
r.MaxNodes = r.MinNodes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.NodeType == "" {
|
if r.NodeType == "" {
|
||||||
r.NodeType = DefaultGKENodeType
|
r.NodeType = defaultGKENodeType
|
||||||
}
|
}
|
||||||
// Only use default backup regions if region is not provided
|
// Only use default backup regions if region is not provided
|
||||||
if len(r.BackupRegions) == 0 && r.Region == "" {
|
if len(r.BackupRegions) == 0 && r.Region == "" {
|
||||||
r.BackupRegions = DefaultGKEBackupRegions
|
r.BackupRegions = defaultGKEBackupRegions
|
||||||
if common.GetOSEnv(backupRegionEnv) != "" {
|
if common.GetOSEnv(backupRegionEnv) != "" {
|
||||||
r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ")
|
r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.Region == "" {
|
if r.Region == "" {
|
||||||
r.Region = DefaultGKERegion
|
r.Region = defaultGKERegion
|
||||||
if common.GetOSEnv(regionEnv) != "" {
|
if common.GetOSEnv(regionEnv) != "" {
|
||||||
r.Region = common.GetOSEnv(regionEnv)
|
r.Region = common.GetOSEnv(regionEnv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.Zone == "" {
|
if r.Zone == "" {
|
||||||
r.Zone = DefaultGKEZone
|
r.Zone = defaultGKEZone
|
||||||
} else { // No backupregions if zone is provided
|
} else { // No backupregions if zone is provided
|
||||||
r.BackupRegions = make([]string, 0)
|
r.BackupRegions = make([]string, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.ResourceType == "" {
|
if r.ResourceType == "" {
|
||||||
r.ResourceType = DefaultResourceType
|
r.ResourceType = defaultResourceType
|
||||||
}
|
}
|
||||||
|
|
||||||
gc.Request = &r
|
gc.Request = &r
|
||||||
|
@ -178,18 +157,13 @@ func (gc *GKECluster) Acquire() error {
|
||||||
// If comes here we are very likely going to create a cluster, unless
|
// If comes here we are very likely going to create a cluster, unless
|
||||||
// the cluster already exists
|
// the cluster already exists
|
||||||
|
|
||||||
// Cleanup if cluster is created by this client
|
// If running on Prow and project name is not provided, get project name from boskos.
|
||||||
gc.NeedsCleanup = !common.IsProw()
|
if gc.Project == "" && gc.IsBoskos {
|
||||||
|
|
||||||
// Get project name from boskos if running in Prow, otherwise it should fail
|
|
||||||
// since we don't know which project to use
|
|
||||||
if gc.Request.Project == "" && common.IsProw() {
|
|
||||||
project, err := gc.boskosOps.AcquireGKEProject(gc.Request.ResourceType)
|
project, err := gc.boskosOps.AcquireGKEProject(gc.Request.ResourceType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed acquiring boskos project: '%v'", err)
|
return fmt.Errorf("failed acquiring boskos project: '%w'", err)
|
||||||
}
|
}
|
||||||
gc.Project = project.Name
|
gc.Project = project.Name
|
||||||
gc.IsBoskos = true
|
|
||||||
}
|
}
|
||||||
if gc.Project == "" {
|
if gc.Project == "" {
|
||||||
return errors.New("GCP project must be set")
|
return errors.New("GCP project must be set")
|
||||||
|
@ -230,7 +204,7 @@ func (gc *GKECluster) Acquire() error {
|
||||||
clusterName := request.ClusterName
|
clusterName := request.ClusterName
|
||||||
// Use cluster if it already exists and running
|
// Use cluster if it already exists and running
|
||||||
existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName)
|
existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName)
|
||||||
if existingCluster != nil && existingCluster.Status == ClusterRunning {
|
if existingCluster != nil && existingCluster.Status == clusterRunning {
|
||||||
gc.Cluster = existingCluster
|
gc.Cluster = existingCluster
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -242,13 +216,12 @@ func (gc *GKECluster) Acquire() error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
|
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
|
||||||
if gc.NeedsCleanup { // Delete half created cluster if it's user created
|
if !common.IsProw() { // Delete half created cluster if it's user created
|
||||||
errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone)
|
errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone)
|
||||||
gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName)
|
gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName)
|
||||||
}
|
}
|
||||||
// Retry another region if cluster creation failed.
|
// Retry another region if cluster creation failed.
|
||||||
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
|
if i != len(regions)-1 && needsRetryCreation(err.Error()) {
|
||||||
if i != len(regions)-1 {
|
|
||||||
errMsg = fmt.Sprintf("%sRetry another region %q for cluster creation", errMsg, regions[i+1])
|
errMsg = fmt.Sprintf("%sRetry another region %q for cluster creation", errMsg, regions[i+1])
|
||||||
}
|
}
|
||||||
log.Print(errMsg)
|
log.Print(errMsg)
|
||||||
|
@ -262,25 +235,32 @@ func (gc *GKECluster) Acquire() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete takes care of GKE cluster resource cleanup. It only release Boskos resource if running in
|
// needsRetryCreation determines if cluster creation needs to be retried based on the error message.
|
||||||
// Prow, otherwise deletes the cluster if marked NeedsCleanup
|
func needsRetryCreation(errMsg string) bool {
|
||||||
|
for _, regx := range retryableCreationErrors {
|
||||||
|
if regx.MatchString(errMsg) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete takes care of GKE cluster resource cleanup.
|
||||||
|
// It also releases Boskos resource if running in Prow.
|
||||||
func (gc *GKECluster) Delete() error {
|
func (gc *GKECluster) Delete() error {
|
||||||
if err := gc.checkEnvironment(); err != nil {
|
var err error
|
||||||
return fmt.Errorf("failed checking project/cluster from environment: '%v'", err)
|
if err = gc.checkEnvironment(); err != nil {
|
||||||
|
return fmt.Errorf("failed checking project/cluster from environment: '%w'", err)
|
||||||
}
|
}
|
||||||
gc.ensureProtected()
|
gc.ensureProtected()
|
||||||
// Release Boskos if running in Prow, will let Janitor taking care of
|
// Release Boskos if running in Prow
|
||||||
// clusters deleting
|
|
||||||
if gc.IsBoskos {
|
if gc.IsBoskos {
|
||||||
log.Printf("Releasing Boskos resource: '%v'", gc.Project)
|
log.Printf("Releasing Boskos resource: '%v'", gc.Project)
|
||||||
return gc.boskosOps.ReleaseGKEProject(gc.Project)
|
if err = gc.boskosOps.ReleaseGKEProject(gc.Project); err != nil {
|
||||||
|
return fmt.Errorf("failed releasing boskos resource: '%w'", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedsCleanup is only true if running locally and cluster created by the
|
|
||||||
// process
|
|
||||||
if !gc.NeedsCleanup && !gc.Request.NeedsCleanup {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Should only get here if running locally and cluster created by this
|
// Should only get here if running locally and cluster created by this
|
||||||
// client, so at this moment cluster should have been set
|
// client, so at this moment cluster should have been set
|
||||||
if gc.Cluster == nil {
|
if gc.Cluster == nil {
|
||||||
|
@ -288,8 +268,13 @@ func (gc *GKECluster) Delete() error {
|
||||||
}
|
}
|
||||||
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
|
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
|
||||||
region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location)
|
region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location)
|
||||||
if err := gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name); err != nil {
|
if gc.AsyncCleanup {
|
||||||
return fmt.Errorf("failed deleting cluster: '%v'", err)
|
_, err = gc.operations.DeleteClusterAsync(gc.Project, region, zone, gc.Cluster.Name)
|
||||||
|
} else {
|
||||||
|
err = gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed deleting cluster: '%w'", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func getResourceName(rt ResourceType) (string, error) {
|
||||||
return "", fmt.Errorf("failed getting BUILD_NUMBER env var")
|
return "", fmt.Errorf("failed getting BUILD_NUMBER env var")
|
||||||
}
|
}
|
||||||
if len(buildNumStr) > 20 {
|
if len(buildNumStr) > 20 {
|
||||||
buildNumStr = string(buildNumStr[:20])
|
buildNumStr = buildNumStr[:20]
|
||||||
}
|
}
|
||||||
resName = fmt.Sprintf("%s-%s", resName, buildNumStr)
|
resName = fmt.Sprintf("%s-%s", resName, buildNumStr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ const (
|
||||||
// after the cluster operation is finished
|
// after the cluster operation is finished
|
||||||
func writeMetaData(cluster *container.Cluster, project string) {
|
func writeMetaData(cluster *container.Cluster, project string) {
|
||||||
// Set up metadata client for saving metadata
|
// Set up metadata client for saving metadata
|
||||||
c, err := client.NewClient("")
|
c, err := client.New("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,12 +21,12 @@ import (
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
clm "knative.dev/pkg/testutils/clustermanager/e2e-tests"
|
clm "knative.dev/pkg/testutils/clustermanager/e2e-tests"
|
||||||
|
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
|
||||||
"knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options"
|
"knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Delete deletes a GKE cluster
|
// Delete deletes a GKE cluster
|
||||||
func Delete(o *options.RequestWrapper) error {
|
func Delete(o *options.RequestWrapper) error {
|
||||||
o.Request.NeedsCleanup = true
|
|
||||||
o.Request.SkipCreation = true
|
o.Request.SkipCreation = true
|
||||||
|
|
||||||
gkeClient := clm.GKEClient{}
|
gkeClient := clm.GKEClient{}
|
||||||
|
@ -40,15 +40,13 @@ func Delete(o *options.RequestWrapper) error {
|
||||||
if err = gkeOps.Delete(); err != nil {
|
if err = gkeOps.Delete(); err != nil {
|
||||||
return fmt.Errorf("failed deleting cluster: '%v'", err)
|
return fmt.Errorf("failed deleting cluster: '%v'", err)
|
||||||
}
|
}
|
||||||
// TODO: uncomment the lines below when previous Delete command becomes
|
// Unset context with best effort. The first command only unsets current
|
||||||
// async operation
|
// context, but doesn't delete the entry from kubeconfig, and should return it's
|
||||||
// // Unset context with best effort. The first command only unsets current
|
// context if succeeded, which can be used by the second command to
|
||||||
// // context, but doesn't delete the entry from kubeconfig, and should return it's
|
// delete it from kubeconfig
|
||||||
// // context if succeeded, which can be used by the second command to
|
if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
|
||||||
// // delete it from kubeconfig
|
common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
|
||||||
// if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
|
}
|
||||||
// common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
|
|
||||||
// }
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,10 +39,10 @@ type client struct {
|
||||||
Path string
|
Path string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a client, takes custom directory for storing `metadata.json`.
|
// New creates a client, takes custom directory for storing `metadata.json`.
|
||||||
// It reads existing `metadata.json` file if it exists, otherwise creates it.
|
// It reads existing `metadata.json` file if it exists, otherwise creates it.
|
||||||
// Errors out if there is any file i/o problem other than file not exist error.
|
// Errors out if there is any file i/o problem other than file not exist error.
|
||||||
func NewClient(dir string) (*client, error) {
|
func New(dir string) (*client, error) {
|
||||||
c := &client{
|
c := &client{
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func NewClient(dir string) (*client, error) {
|
||||||
c.Path = path.Join(dir, filename)
|
c.Path = path.Join(dir, filename)
|
||||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||||
if err = os.MkdirAll(dir, 0777); err != nil {
|
if err = os.MkdirAll(dir, 0777); err != nil {
|
||||||
return nil, fmt.Errorf("Failed to create directory: %v", err)
|
return nil, fmt.Errorf("failed creating directory: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return c, nil
|
return c, nil
|
||||||
|
|
|
@ -38,7 +38,7 @@ func main() {
|
||||||
// Create with default path of metahelper/client, so that the path is
|
// Create with default path of metahelper/client, so that the path is
|
||||||
// consistent with all other consumers of metahelper/client that run within
|
// consistent with all other consumers of metahelper/client that run within
|
||||||
// the same context of this tool
|
// the same context of this tool
|
||||||
c, err := client.NewClient("")
|
c, err := client.New("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue