[master] Auto-update dependencies (#243)

Produced via:
  `./hack/update-deps.sh --upgrade && ./hack/update-codegen.sh`
/assign n3wscott vagababov
/cc n3wscott vagababov
This commit is contained in:
Matt Moore 2020-04-06 08:00:20 -07:00 committed by GitHub
parent 4f26948a1b
commit 6bd9f38fb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 137 additions and 92 deletions

6
Gopkg.lock generated
View File

@ -966,7 +966,7 @@
[[projects]]
branch = "master"
digest = "1:ae72dd6d2950c1ffe31061c827bb8904fce102f9134c4309cca0fe1ac5864c26"
digest = "1:9b83d1c207c67003de88414626add45e9bce8c47502e07cbe6cb5fd48e39f2df"
name = "knative.dev/pkg"
packages = [
"apis",
@ -986,7 +986,7 @@
"reconciler",
]
pruneopts = "T"
revision = "55250e6aab62f767b8680a12ddeb771c519d78fe"
revision = "92cdec5b35931192590f5d8affad86898b6c50d6"
[[projects]]
branch = "master"
@ -997,7 +997,7 @@
"tools/dep-collector",
]
pruneopts = "UT"
revision = "cdec09517ea85b37691ab4a81ee85d74e7e91b7e"
revision = "e84f0d1a364732918a6635cb07d895ff77ecba76"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"

4
vendor/knative.dev/pkg/Gopkg.lock generated vendored
View File

@ -1362,14 +1362,14 @@
[[projects]]
branch = "master"
digest = "1:5da0f15efcd7dafa6296be9cd4a630fcb5253cf202e54638299221ae7df419ab"
digest = "1:d8858077778bca77705b26d5b5262bf33a6bfbaa701fffca1578fd7ef4c4b975"
name = "knative.dev/test-infra"
packages = [
"scripts",
"tools/dep-collector",
]
pruneopts = "UT"
revision = "5bdfbd623938f0cc4a260bf06c503bbb0a76d6f9"
revision = "cdec09517ea85b37691ab4a81ee85d74e7e91b7e"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"

View File

@ -334,7 +334,7 @@ func (r conditionsImpl) findUnhappyDependent() *Condition {
}
// If something was not initialized.
if len(r.dependents) != len(conditions) {
if len(r.dependents) > len(conditions) {
return &Condition{
Status: corev1.ConditionUnknown,
}

View File

@ -32,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
@ -89,15 +90,19 @@ func GetConfig(masterURL, kubeconfig string) (*rest.Config, error) {
// or via reading a configMap from the API.
// The context is expected to be initialized with injection.
func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
loggingConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(logging.ConfigMapName(), metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return logging.NewConfigFromMap(nil)
} else {
var loggingConfigMap *corev1.ConfigMap
// These timeout and retry interval are set by heuristics.
// e.g. istio sidecar needs a few seconds to configure the pod network.
if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
var err error
loggingConfigMap, err = kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(logging.ConfigMapName(), metav1.GetOptions{})
return err == nil || apierrors.IsNotFound(err), nil
}); err != nil {
return nil, err
}
if loggingConfigMap == nil {
return logging.NewConfigFromMap(nil)
}
return logging.NewConfigFromConfigMap(loggingConfigMap)
}

View File

@ -60,6 +60,8 @@ const (
// OpenCensus is used to export to the OpenCensus Agent / Collector,
// which can send to many other services.
OpenCensus metricsBackend = "opencensus"
// None is used to export, well, nothing.
None metricsBackend = "none"
defaultBackendEnvName = "DEFAULT_METRICS_BACKEND"

View File

@ -53,7 +53,7 @@ type ObservabilityConfig struct {
EnableProbeRequestLog bool
// RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus,
// Stackdriver.
// Stackdriver. "None" disables all backends.
RequestMetricsBackend string
// EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from

View File

@ -195,6 +195,8 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.
e, err = newStackdriverExporter(config, logger)
case Prometheus:
e, err = newPrometheusExporter(config, logger)
case None:
e, err = nil, nil
default:
err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination)
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clustermanager
import (
"regexp"
"knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos"
)
const (
defaultGKEMinNodes = 1
defaultGKEMaxNodes = 3
defaultGKENodeType = "e2-standard-4"
defaultGKERegion = "us-central1"
defaultGKEZone = ""
regionEnv = "E2E_CLUSTER_REGION"
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
defaultResourceType = boskos.GKEProjectResource
clusterRunning = "RUNNING"
)
var (
protectedProjects = []string{"knative-tests"}
protectedClusters = []string{"knative-prow"}
defaultGKEBackupRegions = []string{"us-west1", "us-east1"}
// If one of the error patterns below is matched, it would be recommended to
// retry creating the cluster in a different region/zone.
// - stockout (https://github.com/knative/test-infra/issues/592)
// - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694)
retryableCreationErrors = []*regexp.Regexp{
regexp.MustCompile(".*Master version \"[0-9a-z\\-.]+\" is unsupported.*"),
regexp.MustCompile(".*No valid versions with the prefix \"[0-9.]+\" found.*"),
regexp.MustCompile(".*does not have enough resources available to fulfill.*"),
regexp.MustCompile(".*only \\d+ nodes out of \\d+ have registered; this is likely due to Nodes failing to start correctly.*"),
}
)

View File

@ -30,25 +30,6 @@ import (
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
)
const (
DefaultGKEMinNodes = 1
DefaultGKEMaxNodes = 3
DefaultGKENodeType = "e2-standard-4"
DefaultGKERegion = "us-central1"
DefaultGKEZone = ""
regionEnv = "E2E_CLUSTER_REGION"
backupRegionEnv = "E2E_CLUSTER_BACKUP_REGIONS"
DefaultResourceType = boskos.GKEProjectResource
ClusterRunning = "RUNNING"
)
var (
DefaultGKEBackupRegions = []string{"us-west1", "us-east1"}
protectedProjects = []string{"knative-tests"}
protectedClusters = []string{"knative-prow"}
)
// GKEClient implements Client
type GKEClient struct {
}
@ -65,10 +46,6 @@ type GKERequest struct {
// SkipCreation: skips cluster creation
SkipCreation bool
// NeedsCleanup: enforce clean up if given this option, used when running
// locally
NeedsCleanup bool
// ResourceType: the boskos resource type to acquire to hold the cluster in create
ResourceType string
}
@ -80,9 +57,9 @@ type GKECluster struct {
Project string
// IsBoskos is true if the GCP project used is managed by boskos
IsBoskos bool
// NeedsCleanup tells whether the cluster needs to be deleted afterwards
// This probably should be part of task wrapper's logic
NeedsCleanup bool
// AsyncCleanup tells whether the cluster needs to be deleted asynchronously afterwards
// It should be true on Prow but false on local.
AsyncCleanup bool
Cluster *container.Cluster
operations gke.SDKOperations
boskosOps boskos.Operation
@ -93,45 +70,47 @@ type GKECluster struct {
func (gs *GKEClient) Setup(r GKERequest) ClusterOperations {
gc := &GKECluster{}
if r.Project != "" { // use provided project and create cluster
if r.Project != "" { // use provided project to create cluster
gc.Project = r.Project
gc.NeedsCleanup = true
gc.AsyncCleanup = true
} else if common.IsProw() { // if no project is provided and is on Prow, use boskos
gc.IsBoskos = true
}
if r.MinNodes == 0 {
r.MinNodes = DefaultGKEMinNodes
r.MinNodes = defaultGKEMinNodes
}
if r.MaxNodes == 0 {
r.MaxNodes = DefaultGKEMaxNodes
r.MaxNodes = defaultGKEMaxNodes
// We don't want MaxNodes < MinNodes
if r.MinNodes > r.MaxNodes {
r.MaxNodes = r.MinNodes
}
}
if r.NodeType == "" {
r.NodeType = DefaultGKENodeType
r.NodeType = defaultGKENodeType
}
// Only use default backup regions if region is not provided
if len(r.BackupRegions) == 0 && r.Region == "" {
r.BackupRegions = DefaultGKEBackupRegions
r.BackupRegions = defaultGKEBackupRegions
if common.GetOSEnv(backupRegionEnv) != "" {
r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ")
}
}
if r.Region == "" {
r.Region = DefaultGKERegion
r.Region = defaultGKERegion
if common.GetOSEnv(regionEnv) != "" {
r.Region = common.GetOSEnv(regionEnv)
}
}
if r.Zone == "" {
r.Zone = DefaultGKEZone
r.Zone = defaultGKEZone
} else { // No backupregions if zone is provided
r.BackupRegions = make([]string, 0)
}
if r.ResourceType == "" {
r.ResourceType = DefaultResourceType
r.ResourceType = defaultResourceType
}
gc.Request = &r
@ -178,18 +157,13 @@ func (gc *GKECluster) Acquire() error {
// If comes here we are very likely going to create a cluster, unless
// the cluster already exists
// Cleanup if cluster is created by this client
gc.NeedsCleanup = !common.IsProw()
// Get project name from boskos if running in Prow, otherwise it should fail
// since we don't know which project to use
if gc.Request.Project == "" && common.IsProw() {
// If running on Prow and project name is not provided, get project name from boskos.
if gc.Project == "" && gc.IsBoskos {
project, err := gc.boskosOps.AcquireGKEProject(gc.Request.ResourceType)
if err != nil {
return fmt.Errorf("failed acquiring boskos project: '%v'", err)
return fmt.Errorf("failed acquiring boskos project: '%w'", err)
}
gc.Project = project.Name
gc.IsBoskos = true
}
if gc.Project == "" {
return errors.New("GCP project must be set")
@ -230,7 +204,7 @@ func (gc *GKECluster) Acquire() error {
clusterName := request.ClusterName
// Use cluster if it already exists and running
existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName)
if existingCluster != nil && existingCluster.Status == ClusterRunning {
if existingCluster != nil && existingCluster.Status == clusterRunning {
gc.Cluster = existingCluster
return nil
}
@ -242,13 +216,12 @@ func (gc *GKECluster) Acquire() error {
}
if err != nil {
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
if gc.NeedsCleanup { // Delete half created cluster if it's user created
if !common.IsProw() { // Delete half created cluster if it's user created
errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone)
gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName)
}
// Retry another region if cluster creation failed.
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
if i != len(regions)-1 {
if i != len(regions)-1 && needsRetryCreation(err.Error()) {
errMsg = fmt.Sprintf("%sRetry another region %q for cluster creation", errMsg, regions[i+1])
}
log.Print(errMsg)
@ -262,25 +235,32 @@ func (gc *GKECluster) Acquire() error {
return err
}
// Delete takes care of GKE cluster resource cleanup. It only release Boskos resource if running in
// Prow, otherwise deletes the cluster if marked NeedsCleanup
// needsRetryCreation determines if cluster creation needs to be retried based on the error message.
func needsRetryCreation(errMsg string) bool {
for _, regx := range retryableCreationErrors {
if regx.MatchString(errMsg) {
return true
}
}
return false
}
// Delete takes care of GKE cluster resource cleanup.
// It also releases Boskos resource if running in Prow.
func (gc *GKECluster) Delete() error {
if err := gc.checkEnvironment(); err != nil {
return fmt.Errorf("failed checking project/cluster from environment: '%v'", err)
var err error
if err = gc.checkEnvironment(); err != nil {
return fmt.Errorf("failed checking project/cluster from environment: '%w'", err)
}
gc.ensureProtected()
// Release Boskos if running in Prow, will let Janitor taking care of
// clusters deleting
// Release Boskos if running in Prow
if gc.IsBoskos {
log.Printf("Releasing Boskos resource: '%v'", gc.Project)
return gc.boskosOps.ReleaseGKEProject(gc.Project)
if err = gc.boskosOps.ReleaseGKEProject(gc.Project); err != nil {
return fmt.Errorf("failed releasing boskos resource: '%w'", err)
}
}
// NeedsCleanup is only true if running locally and cluster created by the
// process
if !gc.NeedsCleanup && !gc.Request.NeedsCleanup {
return nil
}
// Should only get here if running locally and cluster created by this
// client, so at this moment cluster should have been set
if gc.Cluster == nil {
@ -288,8 +268,13 @@ func (gc *GKECluster) Delete() error {
}
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location)
if err := gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name); err != nil {
return fmt.Errorf("failed deleting cluster: '%v'", err)
if gc.AsyncCleanup {
_, err = gc.operations.DeleteClusterAsync(gc.Project, region, zone, gc.Cluster.Name)
} else {
err = gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name)
}
if err != nil {
return fmt.Errorf("failed deleting cluster: '%w'", err)
}
return nil
}

View File

@ -45,7 +45,7 @@ func getResourceName(rt ResourceType) (string, error) {
return "", fmt.Errorf("failed getting BUILD_NUMBER env var")
}
if len(buildNumStr) > 20 {
buildNumStr = string(buildNumStr[:20])
buildNumStr = buildNumStr[:20]
}
resName = fmt.Sprintf("%s-%s", resName, buildNumStr)
}

View File

@ -44,7 +44,7 @@ const (
// after the cluster operation is finished
func writeMetaData(cluster *container.Cluster, project string) {
// Set up metadata client for saving metadata
c, err := client.NewClient("")
c, err := client.New("")
if err != nil {
log.Fatal(err)
}

View File

@ -21,12 +21,12 @@ import (
"log"
clm "knative.dev/pkg/testutils/clustermanager/e2e-tests"
"knative.dev/pkg/testutils/clustermanager/e2e-tests/common"
"knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options"
)
// Delete deletes a GKE cluster
func Delete(o *options.RequestWrapper) error {
o.Request.NeedsCleanup = true
o.Request.SkipCreation = true
gkeClient := clm.GKEClient{}
@ -40,15 +40,13 @@ func Delete(o *options.RequestWrapper) error {
if err = gkeOps.Delete(); err != nil {
return fmt.Errorf("failed deleting cluster: '%v'", err)
}
// TODO: uncomment the lines below when previous Delete command becomes
// async operation
// // Unset context with best effort. The first command only unsets current
// // context, but doesn't delete the entry from kubeconfig, and should return it's
// // context if succeeded, which can be used by the second command to
// // delete it from kubeconfig
// if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
// common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
// }
// Unset context with best effort. The first command only unsets current
// context, but doesn't delete the entry from kubeconfig, and should return it's
// context if succeeded, which can be used by the second command to
// delete it from kubeconfig
if out, err := common.StandardExec("kubectl", "config", "unset", "current-context"); err != nil {
common.StandardExec("kubectl", "config", "unset", "contexts."+string(out))
}
return nil
}

View File

@ -39,10 +39,10 @@ type client struct {
Path string
}
// NewClient creates a client, takes custom directory for storing `metadata.json`.
// New creates a client, takes custom directory for storing `metadata.json`.
// It reads existing `metadata.json` file if it exists, otherwise creates it.
// Errors out if there is any file i/o problem other than file not exist error.
func NewClient(dir string) (*client, error) {
func New(dir string) (*client, error) {
c := &client{
MetaData: make(map[string]string),
}
@ -53,7 +53,7 @@ func NewClient(dir string) (*client, error) {
c.Path = path.Join(dir, filename)
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0777); err != nil {
return nil, fmt.Errorf("Failed to create directory: %v", err)
return nil, fmt.Errorf("failed creating directory: %w", err)
}
}
return c, nil

View File

@ -38,7 +38,7 @@ func main() {
// Create with default path of metahelper/client, so that the path is
// consistent with all other consumers of metahelper/client that run within
// the same context of this tool
c, err := client.NewClient("")
c, err := client.New("")
if err != nil {
log.Fatal(err)
}