Auto-update dependencies (#100)

Produced via:
  `dep ensure -update knative.dev/test-infra knative.dev/pkg`
/assign mattmoor
This commit is contained in:
mattmoor-sockpuppet 2019-09-16 07:20:40 -07:00 committed by Knative Prow Robot
parent 6185837d36
commit 3f124e23c8
12 changed files with 153 additions and 60 deletions

6
Gopkg.lock generated
View File

@ -927,7 +927,7 @@
[[projects]]
branch = "master"
digest = "1:bdd1f7aa0a13b578d332f970e36861a834ff8559e06b0b718fa5d8df78e448d8"
digest = "1:8c823908d13842f7140f59dc246b98d523f436e884ff7c5cd44aae9c3889c890"
name = "knative.dev/pkg"
packages = [
"apis",
@ -946,7 +946,7 @@
"metrics/metricskey",
]
pruneopts = "T"
revision = "34157973944a3419f1e38fce8dfb11a2fb5aadbb"
revision = "d8d1bc27181b1a612f7648632b84b9706b57df34"
[[projects]]
branch = "master"
@ -957,7 +957,7 @@
"tools/dep-collector",
]
pruneopts = "UT"
revision = "ca04b8453a8779f3c800c5e4bcda43ff53ec6f80"
revision = "4adb699576a34afae2e163f0ad5a0d2db0b30a5b"
[solve-meta]
analyzer-name = "dep"

View File

@ -37,6 +37,8 @@ func marshallBeforeAfter(before, after interface{}) ([]byte, []byte, error) {
return rawBefore, rawAfter, nil
}
// CreateMergePatch creates a json merge patch as specified in
// http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
func CreateMergePatch(before, after interface{}) ([]byte, error) {
rawBefore, rawAfter, err := marshallBeforeAfter(before, after)
if err != nil {
@ -45,6 +47,17 @@ func CreateMergePatch(before, after interface{}) ([]byte, error) {
return jsonmergepatch.CreateMergePatch(rawBefore, rawAfter)
}
// CreateBytePatch is a helper function that creates the same content as
// CreatePatch, but returns in []byte format instead of JSONPatch.
func CreateBytePatch(before, after interface{}) ([]byte, error) {
patch, err := CreatePatch(before, after)
if err != nil {
return nil, err
}
return patch.MarshalJSON()
}
// CreatePatch creates a patch as specified in http://jsonpatch.com/
func CreatePatch(before, after interface{}) (JSONPatch, error) {
rawBefore, rawAfter, err := marshallBeforeAfter(before, after)
if err != nil {

View File

@ -42,10 +42,11 @@ const (
// The following keys are used to configure metrics reporting.
// See https://github.com/knative/serving/blob/master/config/config-observability.yaml
// for details.
AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics"
BackendDestinationKey = "metrics.backend-destination"
ReportingPeriodKey = "metrics.reporting-period-seconds"
StackdriverProjectIDKey = "metrics.stackdriver-project-id"
AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics"
BackendDestinationKey = "metrics.backend-destination"
ReportingPeriodKey = "metrics.reporting-period-seconds"
StackdriverProjectIDKey = "metrics.stackdriver-project-id"
StackdriverCustomMetricSubDomainKey = "metrics.stackdriver-custom-metrics-subdomain"
// Stackdriver is used for Stackdriver backend
Stackdriver metricsBackend = "stackdriver"
@ -103,10 +104,14 @@ type metricsConfig struct {
stackdriverProjectID string
// allowStackdriverCustomMetrics indicates whether it is allowed to send metrics to
// Stackdriver using "global" resource type and custom metric type if the
// metrics are not supported by "knative_revision" resource type. Setting this
// metrics are not supported by the registered monitored resource types. Setting this
// flag to "true" could cause extra Stackdriver charge.
// If backendDestination is not Stackdriver, this is ignored.
allowStackdriverCustomMetrics bool
// stackdriverCustomMetricsSubDomain is the subdomain to use when sending custom metrics to StackDriver.
// If not specified, the default is `knative.dev`.
// If backendDestination is not Stackdriver, this is ignored.
stackdriverCustomMetricsSubDomain string
// True if backendDestination equals to "stackdriver". Store this in a variable
// to reduce string comparison operations.
isStackdriverBackend bool
@ -114,8 +119,8 @@ type metricsConfig struct {
// "knative.dev/serving/activator". Store this in a variable to reduce string
// join operations.
stackdriverMetricTypePrefix string
// stackdriverCustomMetricTypePrefix is "custom.googleapis.com/knative.dev" joins
// component, e.g. "custom.googleapis.com/knative.dev/serving/activator".
// stackdriverCustomMetricTypePrefix is "custom.googleapis.com" joined with the subdomain and component.
// E.g., "custom.googleapis.com/<subdomain>/<component>".
// Store this in a variable to reduce string join operations.
stackdriverCustomMetricTypePrefix string
}
@ -173,7 +178,12 @@ func getMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metricsC
mc.stackdriverProjectID = m[StackdriverProjectIDKey]
mc.isStackdriverBackend = true
mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component)
mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, mc.component)
mc.stackdriverCustomMetricsSubDomain = defaultCustomMetricSubDomain
if sdcmd, ok := m[StackdriverCustomMetricSubDomainKey]; ok && sdcmd != "" {
mc.stackdriverCustomMetricsSubDomain = sdcmd
}
mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, mc.stackdriverCustomMetricsSubDomain, mc.component)
if ascmStr, ok := m[AllowStackdriverCustomMetricsKey]; ok && ascmStr != "" {
ascmBool, err := strconv.ParseBool(ascmStr)
if err != nil {

View File

@ -30,10 +30,13 @@ const (
// ResourceTypeKnativeSource is the Stackdriver resource type for Knative Sources.
ResourceTypeKnativeSource = "knative_source"
// LabelTriggerName is the label for the name of the Trigger.
LabelTriggerName = "trigger_name"
// LabelName is the label for the name of the resource.
LabelName = "name"
// LabelBrokerName is the label for the name of the Broker.
// LabelResourceGroup is the name of the resource CRD.
LabelResourceGroup = "resource_group"
// LabelBrokerName is the label for the name of the Trigger's broker.
LabelBrokerName = "broker_name"
// LabelEventType is the label for the name of the event type.
@ -47,12 +50,6 @@ const (
// LabelFilterSource is the label for the Trigger filter attribute "source".
LabelFilterSource = "filter_source"
// LabelSourceName is the label for the name of the Source.
LabelSourceName = "source_name"
// LabelSourceResourceGroup is the name of the Source CRD.
LabelSourceResourceGroup = "source_resource_group"
)
var (
@ -62,7 +59,7 @@ var (
LabelLocation,
LabelClusterName,
LabelNamespaceName,
LabelTriggerName,
LabelName,
LabelBrokerName,
)
@ -80,7 +77,7 @@ var (
LabelLocation,
LabelClusterName,
LabelNamespaceName,
LabelBrokerName,
LabelName,
)
// KnativeBrokerMetrics stores a set of metric types which are supported
@ -95,8 +92,8 @@ var (
LabelLocation,
LabelClusterName,
LabelNamespaceName,
LabelSourceName,
LabelSourceResourceGroup,
LabelName,
LabelResourceGroup,
)
// KnativeSourceMetrics stores a set of metric types which are supported

View File

@ -59,7 +59,7 @@ func (kt *KnativeTrigger) MonitoredResource() (resType string, labels map[string
metricskey.LabelLocation: kt.Location,
metricskey.LabelClusterName: kt.ClusterName,
metricskey.LabelNamespaceName: kt.NamespaceName,
metricskey.LabelTriggerName: kt.TriggerName,
metricskey.LabelName: kt.TriggerName,
metricskey.LabelBrokerName: kt.BrokerName,
}
return metricskey.ResourceTypeKnativeTrigger, labels
@ -71,19 +71,19 @@ func (kb *KnativeBroker) MonitoredResource() (resType string, labels map[string]
metricskey.LabelLocation: kb.Location,
metricskey.LabelClusterName: kb.ClusterName,
metricskey.LabelNamespaceName: kb.NamespaceName,
metricskey.LabelBrokerName: kb.BrokerName,
metricskey.LabelName: kb.BrokerName,
}
return metricskey.ResourceTypeKnativeBroker, labels
}
func (ki *KnativeSource) MonitoredResource() (resType string, labels map[string]string) {
labels = map[string]string{
metricskey.LabelProject: ki.Project,
metricskey.LabelLocation: ki.Location,
metricskey.LabelClusterName: ki.ClusterName,
metricskey.LabelNamespaceName: ki.NamespaceName,
metricskey.LabelSourceName: ki.SourceName,
metricskey.LabelSourceResourceGroup: ki.SourceResourceGroup,
metricskey.LabelProject: ki.Project,
metricskey.LabelLocation: ki.Location,
metricskey.LabelClusterName: ki.ClusterName,
metricskey.LabelNamespaceName: ki.NamespaceName,
metricskey.LabelName: ki.SourceName,
metricskey.LabelResourceGroup: ki.SourceResourceGroup,
}
return metricskey.ResourceTypeKnativeSource, labels
}
@ -98,7 +98,7 @@ func GetKnativeBrokerMonitoredResource(
ClusterName: gm.cluster,
// The rest resource labels are from metrics labels.
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
BrokerName: valueOrUnknown(metricskey.LabelBrokerName, tagsMap),
BrokerName: valueOrUnknown(metricskey.LabelName, tagsMap),
}
var newTags []tag.Tag
@ -122,7 +122,7 @@ func GetKnativeTriggerMonitoredResource(
ClusterName: gm.cluster,
// The rest resource labels are from metrics labels.
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
TriggerName: valueOrUnknown(metricskey.LabelTriggerName, tagsMap),
TriggerName: valueOrUnknown(metricskey.LabelName, tagsMap),
BrokerName: valueOrUnknown(metricskey.LabelBrokerName, tagsMap),
}
@ -147,8 +147,8 @@ func GetKnativeSourceMonitoredResource(
ClusterName: gm.cluster,
// The rest resource labels are from metrics labels.
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
SourceName: valueOrUnknown(metricskey.LabelSourceName, tagsMap),
SourceResourceGroup: valueOrUnknown(metricskey.LabelSourceResourceGroup, tagsMap),
SourceName: valueOrUnknown(metricskey.LabelName, tagsMap),
SourceResourceGroup: valueOrUnknown(metricskey.LabelResourceGroup, tagsMap),
}
var newTags []tag.Tag

View File

@ -27,10 +27,12 @@ import (
"knative.dev/pkg/metrics/metricskey"
)
// customMetricTypePrefix is the metric type prefix for unsupported metrics by
// resource type knative_revision.
// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
const customMetricTypePrefix = "custom.googleapis.com/knative.dev"
const (
customMetricTypePrefix = "custom.googleapis.com"
// defaultCustomMetricSubDomain is the default subdomain to use for unsupported metrics by monitored resource types.
// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
defaultCustomMetricSubDomain = "knative.dev"
)
var (
// gcpMetadataFunc is the function used to fetch GCP metadata.

View File

@ -44,8 +44,8 @@ var (
namespaceKey = tag.MustNewKey(metricskey.LabelNamespaceName)
eventSourceKey = tag.MustNewKey(metricskey.LabelEventSource)
eventTypeKey = tag.MustNewKey(metricskey.LabelEventType)
sourceNameKey = tag.MustNewKey(metricskey.LabelSourceName)
sourceResourceGroupKey = tag.MustNewKey(metricskey.LabelSourceResourceGroup)
sourceNameKey = tag.MustNewKey(metricskey.LabelName)
sourceResourceGroupKey = tag.MustNewKey(metricskey.LabelResourceGroup)
responseCodeKey = tag.MustNewKey(metricskey.LabelResponseCode)
responseCodeClassKey = tag.MustNewKey(metricskey.LabelResponseCodeClass)
)

View File

@ -254,7 +254,7 @@ func (sc *SpoofingClient) logZipkinTrace(spoofResp *Response) {
traceID := spoofResp.Header.Get(zipkin.ZipkinTraceIDHeader)
sc.logf("Logging Zipkin Trace for: %s", traceID)
json, err := zipkin.JSONTrace(traceID, /* We don't know the expected number of spans */ -1, 5 * time.Second)
json, err := zipkin.JSONTrace(traceID /* We don't know the expected number of spans */, -1, 5*time.Second)
if err != nil {
if _, ok := err.(*zipkin.TimeoutError); !ok {
sc.logf("Error getting zipkin trace: %v", err)

View File

@ -137,7 +137,7 @@ func CheckZipkinPortAvailability() error {
// again. If it reaches timeout, then it returns everything it has so far with an error.
func JSONTrace(traceID string, expected int, timeout time.Duration) (trace []model.SpanModel, err error) {
t := time.After(timeout)
for ; len(trace) != expected; {
for len(trace) != expected {
select {
case <-t:
return trace, &TimeoutError{}
@ -153,7 +153,8 @@ func JSONTrace(traceID string, expected int, timeout time.Duration) (trace []mod
// TimeoutError is an error returned by JSONTrace if it times out before getting the expected number
// of traces.
type TimeoutError struct {}
type TimeoutError struct{}
func (*TimeoutError) Error() string {
return "timeout getting JSONTrace"
}

View File

@ -23,16 +23,32 @@ import (
"knative.dev/pkg/testutils/clustermanager/boskos"
)
const (
fakeOwner = "fake-owner"
)
// FakeBoskosClient implements boskos.Operation
type FakeBoskosClient struct {
resources []*boskoscommon.Resource
}
func (c *FakeBoskosClient) getOwner(host *string) string {
if nil == host {
return fakeOwner
}
return *host
}
func (c *FakeBoskosClient) GetResources() []*boskoscommon.Resource {
return c.resources
}
// AcquireGKEProject fakes to be no op
func (c *FakeBoskosClient) AcquireGKEProject(host *string) (*boskoscommon.Resource, error) {
for _, res := range c.resources {
if res.State == boskoscommon.Free {
res.State = boskoscommon.Busy
res.Owner = c.getOwner(host)
return res, nil
}
}
@ -41,18 +57,15 @@ func (c *FakeBoskosClient) AcquireGKEProject(host *string) (*boskoscommon.Resour
// ReleaseGKEProject fakes to be no op
func (c *FakeBoskosClient) ReleaseGKEProject(host *string, name string) error {
if nil == host {
return fmt.Errorf("host has to be set")
}
owner := c.getOwner(host)
for _, res := range c.resources {
if res.Name == name {
if res.Owner == *host {
if res.Owner == owner {
res.Owner = ""
res.State = boskoscommon.Free
return nil
} else {
return fmt.Errorf("Got owner: '%s', expect owner: '%s'", res.Owner, *host)
return fmt.Errorf("Got owner: '%s', expect owner: '%s'", res.Owner, owner)
}
}
}

View File

@ -26,4 +26,5 @@ type ClusterOperations interface {
Provider() string
Initialize() error
Acquire() error
Delete() error
}

View File

@ -46,6 +46,7 @@ var (
protectedClusters = []string{"knative-prow"}
// These are arbitrary numbers determined based on past experience
creationTimeout = 20 * time.Minute
deletionTimeout = 10 * time.Minute
)
// GKEClient implements Client
@ -77,6 +78,7 @@ type GKECluster struct {
// GKESDKOperations wraps GKE SDK related functions
type GKESDKOperations interface {
create(string, string, *container.CreateClusterRequest) (*container.Operation, error)
delete(string, string, string) (*container.Operation, error)
get(string, string, string) (*container.Cluster, error)
getOperation(string, string, string) (*container.Operation, error)
}
@ -91,6 +93,12 @@ func (gsc *GKESDKClient) create(project, location string, rb *container.CreateCl
return gsc.Projects.Locations.Clusters.Create(parent, rb).Context(context.Background()).Do()
}
// delete deletes GKE cluster and waits until completion
func (gsc *GKESDKClient) delete(project, clusterName, location string) (*container.Operation, error) {
parent := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, clusterName)
return gsc.Projects.Locations.Clusters.Delete(parent).Context(context.Background()).Do()
}
func (gsc *GKESDKClient) get(project, location, cluster string) (*container.Cluster, error) {
clusterFullPath := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, cluster)
return gsc.Projects.Locations.Clusters.Get(clusterFullPath).Context(context.Background()).Do()
@ -222,7 +230,10 @@ func (gc *GKECluster) Acquire() error {
}
}
var cluster *container.Cluster
var op *container.Operation
for i, region := range regions {
// Restore innocence
err = nil
rb := &container.CreateClusterRequest{
Cluster: &container.Cluster{
Name: clusterName,
@ -235,24 +246,36 @@ func (gc *GKECluster) Acquire() error {
}
clusterLoc := getClusterLocation(region, gc.Request.Zone)
// TODO(chaodaiG): add deleting logic once cluster deletion logic is done
log.Printf("Creating cluster %q' in %q", clusterName, clusterLoc)
var createOp *container.Operation
createOp, err = gc.operations.create(*gc.Project, clusterLoc, rb)
// Deleting cluster if it already exists
existingCluster, _ := gc.operations.get(*gc.Project, clusterLoc, clusterName)
if nil != existingCluster {
log.Printf("Cluster %q already exists in %q. Deleting...", clusterName, clusterLoc)
op, err = gc.operations.delete(*gc.Project, clusterName, clusterLoc)
if nil == err {
err = gc.wait(clusterLoc, op.Name, deletionTimeout)
}
}
// Creating cluster only if previous step succeeded
if nil == err {
if err = gc.wait(clusterLoc, createOp.Name, creationTimeout); nil == err {
cluster, err = gc.operations.get(*gc.Project, clusterLoc, rb.Cluster.Name)
log.Printf("Creating cluster %q in %q", clusterName, clusterLoc)
op, err = gc.operations.create(*gc.Project, clusterLoc, rb)
if nil == err {
if err = gc.wait(clusterLoc, op.Name, creationTimeout); nil == err {
cluster, err = gc.operations.get(*gc.Project, clusterLoc, rb.Cluster.Name)
}
}
}
if nil != err {
errMsg := fmt.Sprintf("error creating cluster: '%v'", err)
errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err)
if gc.NeedCleanup { // Delete half created cluster if it's user created
// TODO(chaodaiG): add this part when deletion logic is done
errMsg = fmt.Sprintf("%sDeleting cluster %q in %q in background...\n", errMsg, clusterName, clusterLoc)
go gc.operations.delete(*gc.Project, clusterName, clusterLoc)
}
// Retry another region if cluster creation failed.
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
if len(regions) != i+1 {
errMsg = fmt.Sprintf("%s. Retry another region '%s' for cluster creation", errMsg, regions[i+1])
errMsg = fmt.Sprintf("%sRetry another region %q for cluster creation", errMsg, regions[i+1])
}
log.Printf(errMsg)
} else {
@ -265,6 +288,39 @@ func (gc *GKECluster) Acquire() error {
return err
}
// Delete takes care of GKE cluster resource cleanup. It only release Boskos resource if running in
// Prow, otherwise deletes the cluster if marked NeedsCleanup
func (gc *GKECluster) Delete() error {
gc.ensureProtected()
// Release Boskos if running in Prow, will let Janitor taking care of
// clusters deleting
if common.IsProw() {
log.Printf("Releasing Boskos resource: '%v'", *gc.Project)
return gc.boskosOps.ReleaseGKEProject(nil, *gc.Project)
}
// NeedCleanup is only true if running locally and cluster created by the
// process
if !gc.NeedCleanup {
return nil
}
// Should only get here if running locally and cluster created by this
// client, so at this moment cluster should have been set
if nil == gc.Cluster {
return fmt.Errorf("cluster doesn't exist")
}
log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location)
op, err := gc.operations.delete(*gc.Project, gc.Cluster.Name, gc.Cluster.Location)
if nil == err {
err = gc.wait(gc.Cluster.Location, op.Name, deletionTimeout)
}
if nil != err {
return fmt.Errorf("failed deleting cluster: '%v'", err)
}
return nil
}
// wait depends on unique opName(operation ID created by cloud), and waits until
// it's done
func (gc *GKECluster) wait(location, opName string, wait time.Duration) error {