mirror of https://github.com/knative/caching.git
Auto-update dependencies (#98)
Produced via: `dep ensure -update knative.dev/test-infra knative.dev/pkg` /assign mattmoor
This commit is contained in:
parent
759b5f051a
commit
21bff291f8
|
@ -927,7 +927,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:03c8c36f66fba88d6184f87e0bcc24262a53efc9d47adbd9a071a69ad4c001fc"
|
||||
digest = "1:dfd153fff295bf8023d8f9eca57d4c3fd76790619093b1e6d216810b82fed7a2"
|
||||
name = "knative.dev/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
|
@ -946,7 +946,7 @@
|
|||
"metrics/metricskey",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "c39ee25c42f53e4024d60511a2edef2065ea7333"
|
||||
revision = "87ad483365174cda3ee83f1c0832df930f9adb52"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
|
@ -1245,14 +1245,14 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e873112f19e9ce823bc01ca715de355bd5cf8e4d6045ed5f34b3eb0d6ec2c655"
|
||||
digest = "1:615f3c6b974179c583edf50234fd87c5731d02ef941f314cbd4dd2766d3a619a"
|
||||
name = "knative.dev/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "5449c8bad49d3528b5cb0b58a0cdce0f8a526f03"
|
||||
revision = "816123d5e71fb88533d1564c331443387967e1dc"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
|
|
|
@ -27,8 +27,8 @@ const (
|
|||
// ResourceTypeKnativeBroker is the Stackdriver resource type for Knative Brokers.
|
||||
ResourceTypeKnativeBroker = "knative_broker"
|
||||
|
||||
// ResourceTypeKnativeImporter is the Stackdriver resource type for Knative Importers.
|
||||
ResourceTypeKnativeImporter = "knative_importer"
|
||||
// ResourceTypeKnativeSource is the Stackdriver resource type for Knative Sources.
|
||||
ResourceTypeKnativeSource = "knative_source"
|
||||
|
||||
// LabelTriggerName is the label for the name of the Trigger.
|
||||
LabelTriggerName = "trigger_name"
|
||||
|
@ -48,11 +48,11 @@ const (
|
|||
// LabelFilterSource is the label for the Trigger filter attribute "source".
|
||||
LabelFilterSource = "filter_source"
|
||||
|
||||
// LabelImporterName is the label for the name of the Importer.
|
||||
LabelImporterName = "importer_name"
|
||||
// LabelSourceName is the label for the name of the Source.
|
||||
LabelSourceName = "source_name"
|
||||
|
||||
// LabelImporterResourceGroup is the name of the Importer CRD.
|
||||
LabelImporterResourceGroup = "importer_resource_group"
|
||||
// LabelSourceResourceGroup is the name of the Source CRD.
|
||||
LabelSourceResourceGroup = "source_resource_group"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -89,19 +89,19 @@ var (
|
|||
"knative.dev/eventing/broker/event_count",
|
||||
)
|
||||
|
||||
// KnativeImporterLabels stores the set of resource labels for resource type knative_importer.
|
||||
KnativeImporterLabels = sets.NewString(
|
||||
// KnativeSourceLabels stores the set of resource labels for resource type knative_source.
|
||||
KnativeSourceLabels = sets.NewString(
|
||||
LabelProject,
|
||||
LabelLocation,
|
||||
LabelClusterName,
|
||||
LabelNamespaceName,
|
||||
LabelImporterName,
|
||||
LabelImporterResourceGroup,
|
||||
LabelSourceName,
|
||||
LabelSourceResourceGroup,
|
||||
)
|
||||
|
||||
// KnativeImporterMetrics stores a set of metric types which are supported
|
||||
// by resource type knative_importer.
|
||||
KnativeImporterMetrics = sets.NewString(
|
||||
"knative.dev/eventing/importer/event_count",
|
||||
// KnativeSourceMetrics stores a set of metric types which are supported
|
||||
// by resource type knative_source.
|
||||
KnativeSourceMetrics = sets.NewString(
|
||||
"knative.dev/eventing/source/event_count",
|
||||
)
|
||||
)
|
||||
|
|
|
@ -44,13 +44,13 @@ type KnativeBroker struct {
|
|||
BrokerName string
|
||||
}
|
||||
|
||||
type KnativeImporter struct {
|
||||
Project string
|
||||
Location string
|
||||
ClusterName string
|
||||
NamespaceName string
|
||||
ImporterName string
|
||||
ImporterResourceGroup string
|
||||
type KnativeSource struct {
|
||||
Project string
|
||||
Location string
|
||||
ClusterName string
|
||||
NamespaceName string
|
||||
SourceName string
|
||||
SourceResourceGroup string
|
||||
}
|
||||
|
||||
func (kt *KnativeTrigger) MonitoredResource() (resType string, labels map[string]string) {
|
||||
|
@ -76,16 +76,16 @@ func (kb *KnativeBroker) MonitoredResource() (resType string, labels map[string]
|
|||
return metricskey.ResourceTypeKnativeBroker, labels
|
||||
}
|
||||
|
||||
func (ki *KnativeImporter) MonitoredResource() (resType string, labels map[string]string) {
|
||||
func (ki *KnativeSource) MonitoredResource() (resType string, labels map[string]string) {
|
||||
labels = map[string]string{
|
||||
metricskey.LabelProject: ki.Project,
|
||||
metricskey.LabelLocation: ki.Location,
|
||||
metricskey.LabelClusterName: ki.ClusterName,
|
||||
metricskey.LabelNamespaceName: ki.NamespaceName,
|
||||
metricskey.LabelImporterName: ki.ImporterName,
|
||||
metricskey.LabelImporterResourceGroup: ki.ImporterResourceGroup,
|
||||
metricskey.LabelProject: ki.Project,
|
||||
metricskey.LabelLocation: ki.Location,
|
||||
metricskey.LabelClusterName: ki.ClusterName,
|
||||
metricskey.LabelNamespaceName: ki.NamespaceName,
|
||||
metricskey.LabelSourceName: ki.SourceName,
|
||||
metricskey.LabelSourceResourceGroup: ki.SourceResourceGroup,
|
||||
}
|
||||
return metricskey.ResourceTypeKnativeImporter, labels
|
||||
return metricskey.ResourceTypeKnativeSource, labels
|
||||
}
|
||||
|
||||
func GetKnativeBrokerMonitoredResource(
|
||||
|
@ -137,24 +137,24 @@ func GetKnativeTriggerMonitoredResource(
|
|||
return newTags, kt
|
||||
}
|
||||
|
||||
func GetKnativeImporterMonitoredResource(
|
||||
func GetKnativeSourceMonitoredResource(
|
||||
v *view.View, tags []tag.Tag, gm *gcpMetadata) ([]tag.Tag, monitoredresource.Interface) {
|
||||
tagsMap := getTagsMap(tags)
|
||||
ki := &KnativeImporter{
|
||||
ki := &KnativeSource{
|
||||
// The first three resource labels are from metadata.
|
||||
Project: gm.project,
|
||||
Location: gm.location,
|
||||
ClusterName: gm.cluster,
|
||||
// The rest resource labels are from metrics labels.
|
||||
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
|
||||
ImporterName: valueOrUnknown(metricskey.LabelImporterName, tagsMap),
|
||||
ImporterResourceGroup: valueOrUnknown(metricskey.LabelImporterResourceGroup, tagsMap),
|
||||
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
|
||||
SourceName: valueOrUnknown(metricskey.LabelSourceName, tagsMap),
|
||||
SourceResourceGroup: valueOrUnknown(metricskey.LabelSourceResourceGroup, tagsMap),
|
||||
}
|
||||
|
||||
var newTags []tag.Tag
|
||||
for _, t := range tags {
|
||||
// Keep the metrics labels that are not resource labels
|
||||
if !metricskey.KnativeImporterLabels.Has(t.Key.Name()) {
|
||||
if !metricskey.KnativeSourceLabels.Has(t.Key.Name()) {
|
||||
newTags = append(newTags, t)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
// 2) The backend is not Stackdriver.
|
||||
// 3) The backend is Stackdriver and it is allowed to use custom metrics.
|
||||
// 4) The backend is Stackdriver and the metric is one of the built-in metrics: "knative_revision", "knative_broker",
|
||||
// "knative_trigger", "knative_importer".
|
||||
// "knative_trigger", "knative_source".
|
||||
func Record(ctx context.Context, ms stats.Measurement) {
|
||||
mc := getCurMetricsConfig()
|
||||
|
||||
|
@ -57,7 +57,7 @@ func Record(ctx context.Context, ms stats.Measurement) {
|
|||
isServingBuiltIn := metricskey.KnativeRevisionMetrics.Has(metricType)
|
||||
isEventingBuiltIn := metricskey.KnativeTriggerMetrics.Has(metricType) ||
|
||||
metricskey.KnativeBrokerMetrics.Has(metricType) ||
|
||||
metricskey.KnativeImporterMetrics.Has(metricType)
|
||||
metricskey.KnativeSourceMetrics.Has(metricType)
|
||||
|
||||
if isServingBuiltIn || isEventingBuiltIn {
|
||||
stats.Record(ctx, ms)
|
||||
|
|
|
@ -87,10 +87,10 @@ func getMonitoredResourceFunc(metricTypePrefix string, gm *gcpMetadata) func(v *
|
|||
return GetKnativeBrokerMonitoredResource(view, tags, gm)
|
||||
} else if metricskey.KnativeTriggerMetrics.Has(metricType) {
|
||||
return GetKnativeTriggerMonitoredResource(view, tags, gm)
|
||||
} else if metricskey.KnativeImporterMetrics.Has(metricType) {
|
||||
return GetKnativeImporterMonitoredResource(view, tags, gm)
|
||||
} else if metricskey.KnativeSourceMetrics.Has(metricType) {
|
||||
return GetKnativeSourceMonitoredResource(view, tags, gm)
|
||||
}
|
||||
// Unsupported metric by knative_revision, knative_broker, knative_trigger, and knative_importer, use "global" resource type.
|
||||
// Unsupported metric by knative_revision, knative_broker, knative_trigger, and knative_source, use "global" resource type.
|
||||
return getGlobalMonitoredResource(view, tags)
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func getMetricTypeFunc(metricTypePrefix, customMetricTypePrefix string) func(vie
|
|||
inServing := metricskey.KnativeRevisionMetrics.Has(metricType)
|
||||
inEventing := metricskey.KnativeBrokerMetrics.Has(metricType) ||
|
||||
metricskey.KnativeTriggerMetrics.Has(metricType) ||
|
||||
metricskey.KnativeImporterMetrics.Has(metricType)
|
||||
metricskey.KnativeSourceMetrics.Has(metricType)
|
||||
if inServing || inEventing {
|
||||
return metricType
|
||||
}
|
||||
|
|
|
@ -14,14 +14,15 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
"go.opencensus.io/stats/view"
|
||||
"knative.dev/pkg/metrics"
|
||||
"strconv"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"knative.dev/pkg/metrics/metricskey"
|
||||
)
|
||||
|
@ -33,6 +34,19 @@ var (
|
|||
"Number of events sent",
|
||||
stats.UnitDimensionless,
|
||||
)
|
||||
|
||||
// Create the tag keys that will be used to add tags to our measurements.
|
||||
// Tag keys must conform to the restrictions described in
|
||||
// go.opencensus.io/tag/validate.go. Currently those restrictions are:
|
||||
// - length between 1 and 255 inclusive
|
||||
// - characters are printable US-ASCII
|
||||
namespaceKey = tag.MustNewKey(metricskey.LabelNamespaceName)
|
||||
eventSourceKey = tag.MustNewKey(metricskey.LabelEventSource)
|
||||
eventTypeKey = tag.MustNewKey(metricskey.LabelEventType)
|
||||
sourceNameKey = tag.MustNewKey(metricskey.LabelSourceName)
|
||||
sourceResourceGroupKey = tag.MustNewKey(metricskey.LabelSourceResourceGroup)
|
||||
responseCodeKey = tag.MustNewKey(metricskey.LabelResponseCode)
|
||||
responseCodeClassKey = tag.MustNewKey(metricskey.LabelResponseCodeClass)
|
||||
)
|
||||
|
||||
type ReportArgs struct {
|
||||
|
@ -43,6 +57,10 @@ type ReportArgs struct {
|
|||
ResourceGroup string
|
||||
}
|
||||
|
||||
func init() {
|
||||
register()
|
||||
}
|
||||
|
||||
// StatsReporter defines the interface for sending source metrics.
|
||||
type StatsReporter interface {
|
||||
// ReportEventCount captures the event count. It records one per call.
|
||||
|
@ -53,75 +71,18 @@ var _ StatsReporter = (*reporter)(nil)
|
|||
|
||||
// reporter holds cached metric objects to report source metrics.
|
||||
type reporter struct {
|
||||
namespaceTagKey tag.Key
|
||||
eventSourceTagKey tag.Key
|
||||
eventTypeTagKey tag.Key
|
||||
sourceNameTagKey tag.Key
|
||||
sourceResourceGroupTagKey tag.Key
|
||||
responseCodeKey tag.Key
|
||||
responseCodeClassKey tag.Key
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewStatsReporter creates a reporter that collects and reports source metrics.
|
||||
func NewStatsReporter() (StatsReporter, error) {
|
||||
var r = &reporter{}
|
||||
|
||||
// Create the tag keys that will be used to add tags to our measurements.
|
||||
nsTag, err := tag.NewKey(metricskey.LabelNamespaceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.namespaceTagKey = nsTag
|
||||
|
||||
eventSourceTag, err := tag.NewKey(metricskey.LabelEventSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.eventSourceTagKey = eventSourceTag
|
||||
|
||||
eventTypeTag, err := tag.NewKey(metricskey.LabelEventType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.eventTypeTagKey = eventTypeTag
|
||||
|
||||
nameTag, err := tag.NewKey(metricskey.LabelImporterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.sourceNameTagKey = nameTag
|
||||
|
||||
resourceGroupTag, err := tag.NewKey(metricskey.LabelImporterResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.sourceResourceGroupTagKey = resourceGroupTag
|
||||
|
||||
responseCodeTag, err := tag.NewKey(metricskey.LabelResponseCode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.responseCodeKey = responseCodeTag
|
||||
responseCodeClassTag, err := tag.NewKey(metricskey.LabelResponseCodeClass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.responseCodeClassKey = responseCodeClassTag
|
||||
|
||||
// Create view to see our measurements.
|
||||
err = view.Register(
|
||||
&view.View{
|
||||
Description: eventCountM.Description(),
|
||||
Measure: eventCountM,
|
||||
Aggregation: view.Count(),
|
||||
TagKeys: []tag.Key{r.namespaceTagKey, r.eventSourceTagKey, r.eventTypeTagKey, r.sourceNameTagKey, r.sourceResourceGroupTagKey, r.responseCodeKey, r.responseCodeClassKey},
|
||||
},
|
||||
ctx, err := tag.New(
|
||||
context.Background(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
return &reporter{ctx: ctx}, nil
|
||||
}
|
||||
|
||||
func (r *reporter) ReportEventCount(args *ReportArgs, responseCode int) error {
|
||||
|
@ -129,18 +90,41 @@ func (r *reporter) ReportEventCount(args *ReportArgs, responseCode int) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Record(ctx, eventCountM.M(1))
|
||||
metrics.Record(ctx, eventCountM.M(1))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *reporter) generateTag(args *ReportArgs, responseCode int) (context.Context, error) {
|
||||
return tag.New(
|
||||
context.Background(),
|
||||
tag.Insert(r.namespaceTagKey, args.Namespace),
|
||||
tag.Insert(r.eventSourceTagKey, args.EventSource),
|
||||
tag.Insert(r.eventTypeTagKey, args.EventType),
|
||||
tag.Insert(r.sourceNameTagKey, args.Name),
|
||||
tag.Insert(r.sourceResourceGroupTagKey, args.ResourceGroup),
|
||||
tag.Insert(r.responseCodeKey, strconv.Itoa(responseCode)),
|
||||
tag.Insert(r.responseCodeClassKey, ResponseCodeClass(responseCode)))
|
||||
r.ctx,
|
||||
tag.Insert(namespaceKey, args.Namespace),
|
||||
tag.Insert(eventSourceKey, args.EventSource),
|
||||
tag.Insert(eventTypeKey, args.EventType),
|
||||
tag.Insert(sourceNameKey, args.Name),
|
||||
tag.Insert(sourceResourceGroupKey, args.ResourceGroup),
|
||||
tag.Insert(responseCodeKey, strconv.Itoa(responseCode)),
|
||||
tag.Insert(responseCodeClassKey, metrics.ResponseCodeClass(responseCode)))
|
||||
}
|
||||
|
||||
func register() {
|
||||
tagKeys := []tag.Key{
|
||||
namespaceKey,
|
||||
eventSourceKey,
|
||||
eventTypeKey,
|
||||
sourceNameKey,
|
||||
sourceResourceGroupKey,
|
||||
responseCodeKey,
|
||||
responseCodeClassKey}
|
||||
|
||||
// Create view to see our measurements.
|
||||
if err := view.Register(
|
||||
&view.View{
|
||||
Description: eventCountM.Description(),
|
||||
Measure: eventCountM,
|
||||
Aggregation: view.Count(),
|
||||
TagKeys: tagKeys,
|
||||
},
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
|
||||
// error.go helps with error handling
|
||||
|
||||
package alerter
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package alerter
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"log"
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package alerter
|
||||
|
||||
import (
|
||||
qpb "github.com/google/mako/proto/quickstore/quickstore_go_proto"
|
||||
"knative.dev/pkg/test/mako/alerter/github"
|
||||
"knative.dev/pkg/test/mako/alerter/slack"
|
||||
)
|
||||
|
||||
// Alerter controls alert for performance regressions detected by Mako.
|
||||
type Alerter struct {
|
||||
githubIssueHandler *github.IssueHandler
|
||||
slackMessageHandler *slack.MessageHandler
|
||||
}
|
||||
|
||||
// SetupGitHub will setup SetupGitHub for the alerter.
|
||||
func (alerter *Alerter) SetupGitHub(org, repo, githubTokenPath string) error {
|
||||
issueHandler, err := github.Setup(org, repo, githubTokenPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
alerter.githubIssueHandler = issueHandler
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupSlack will setup Slack for the alerter.
|
||||
func (alerter *Alerter) SetupSlack(repo, userName, readTokenPath, writeTokenPath string) error {
|
||||
messageHandler, err := slack.Setup(userName, readTokenPath, writeTokenPath, repo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
alerter.slackMessageHandler = messageHandler
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleBenchmarkResult will handle the benchmark result which returns from `q.Store()`
|
||||
func (alerter *Alerter) HandleBenchmarkResult(testName string, output qpb.QuickstoreOutput, err error) {
|
||||
if err != nil {
|
||||
if output.GetStatus() == qpb.QuickstoreOutput_ANALYSIS_FAIL {
|
||||
summary := output.GetSummaryOutput()
|
||||
if alerter.githubIssueHandler != nil {
|
||||
alerter.githubIssueHandler.CreateIssueForTest(testName, summary)
|
||||
}
|
||||
if alerter.slackMessageHandler != nil {
|
||||
alerter.slackMessageHandler.SendAlert(summary)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
if alerter.githubIssueHandler != nil {
|
||||
alerter.githubIssueHandler.CloseIssueForTest(testName)
|
||||
}
|
||||
}
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/google/go-github/github"
|
||||
|
||||
"knative.dev/pkg/test/ghutil"
|
||||
"knative.dev/pkg/test/mako/alerter"
|
||||
"knative.dev/pkg/test/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -39,14 +39,14 @@ const (
|
|||
### Auto-generated issue tracking performance regression
|
||||
* **Test name**: %s`
|
||||
|
||||
// reopenIssueCommentTemplate is a template for the comment of an issue that is reopened
|
||||
reopenIssueCommentTemplate = `
|
||||
New regression has been detected, reopening this issue:
|
||||
%s`
|
||||
|
||||
// newIssueCommentTemplate is a template for the comment of an issue that has been quiet for a long time
|
||||
newIssueCommentTemplate = `
|
||||
A new regression for this test has been detected:
|
||||
%s`
|
||||
|
||||
// reopenIssueCommentTemplate is a template for the comment of an issue that is reopened
|
||||
reopenIssueCommentTemplate = `
|
||||
New regression has been detected, reopening this issue:
|
||||
%s`
|
||||
|
||||
// closeIssueComment is the comment of an issue when we close it
|
||||
|
@ -54,8 +54,8 @@ A new regression for this test has been detected:
|
|||
The performance regression goes way for this test, closing this issue.`
|
||||
)
|
||||
|
||||
// issueHandler handles methods for github issues
|
||||
type issueHandler struct {
|
||||
// IssueHandler handles methods for github issues
|
||||
type IssueHandler struct {
|
||||
client ghutil.GithubOperations
|
||||
config config
|
||||
}
|
||||
|
@ -68,17 +68,18 @@ type config struct {
|
|||
}
|
||||
|
||||
// Setup creates the necessary setup to make calls to work with github issues
|
||||
func Setup(githubToken string, config config) (*issueHandler, error) {
|
||||
ghc, err := ghutil.NewGithubClient(githubToken)
|
||||
func Setup(org, repo, githubTokenPath string, dryrun bool) (*IssueHandler, error) {
|
||||
ghc, err := ghutil.NewGithubClient(githubTokenPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot authenticate to github: %v", err)
|
||||
}
|
||||
return &issueHandler{client: ghc, config: config}, nil
|
||||
conf := config{org: org, repo: repo, dryrun: dryrun}
|
||||
return &IssueHandler{client: ghc, config: conf}, nil
|
||||
}
|
||||
|
||||
// CreateIssueForTest will try to add an issue with the given testName and description.
|
||||
// If there is already an issue related to the test, it will try to update that issue.
|
||||
func (gih *issueHandler) CreateIssueForTest(testName, desc string) error {
|
||||
func (gih *IssueHandler) CreateIssueForTest(testName, desc string) error {
|
||||
org := gih.config.org
|
||||
repo := gih.config.repo
|
||||
dryrun := gih.config.dryrun
|
||||
|
@ -87,7 +88,8 @@ func (gih *issueHandler) CreateIssueForTest(testName, desc string) error {
|
|||
// If the issue hasn't been created, create one
|
||||
if issue == nil {
|
||||
body := fmt.Sprintf(issueBodyTemplate, testName)
|
||||
if err := gih.createNewIssue(org, repo, title, body, dryrun); err != nil {
|
||||
issue, err := gih.createNewIssue(org, repo, title, body, dryrun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
comment := fmt.Sprintf(newIssueCommentTemplate, desc)
|
||||
|
@ -118,9 +120,9 @@ func (gih *issueHandler) CreateIssueForTest(testName, desc string) error {
|
|||
}
|
||||
|
||||
// createNewIssue will create a new issue, and add perfLabel for it.
|
||||
func (gih *issueHandler) createNewIssue(org, repo, title, body string, dryrun bool) error {
|
||||
func (gih *IssueHandler) createNewIssue(org, repo, title, body string, dryrun bool) (*github.Issue, error) {
|
||||
var newIssue *github.Issue
|
||||
if err := alerter.Run(
|
||||
if err := helpers.Run(
|
||||
"creating issue",
|
||||
func() error {
|
||||
var err error
|
||||
|
@ -129,20 +131,23 @@ func (gih *issueHandler) createNewIssue(org, repo, title, body string, dryrun bo
|
|||
},
|
||||
dryrun,
|
||||
); nil != err {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return alerter.Run(
|
||||
if err := helpers.Run(
|
||||
"adding perf label",
|
||||
func() error {
|
||||
return gih.client.AddLabelsToIssue(org, repo, *newIssue.Number, []string{perfLabel})
|
||||
},
|
||||
dryrun,
|
||||
)
|
||||
); nil != err {
|
||||
return nil, err
|
||||
}
|
||||
return newIssue, nil
|
||||
}
|
||||
|
||||
// CloseIssueForTest will try to close the issue for the given testName.
|
||||
// If there is no issue related to the test or the issue is already closed, the function will do nothing.
|
||||
func (gih *issueHandler) CloseIssueForTest(testName string) error {
|
||||
func (gih *IssueHandler) CloseIssueForTest(testName string) error {
|
||||
org := gih.config.org
|
||||
repo := gih.config.repo
|
||||
dryrun := gih.config.dryrun
|
||||
|
@ -153,7 +158,7 @@ func (gih *issueHandler) CloseIssueForTest(testName string) error {
|
|||
}
|
||||
|
||||
issueNumber := *issue.Number
|
||||
if err := alerter.Run(
|
||||
if err := helpers.Run(
|
||||
"add comment for the issue to close",
|
||||
func() error {
|
||||
_, cErr := gih.client.CreateComment(org, repo, issueNumber, closeIssueComment)
|
||||
|
@ -163,7 +168,7 @@ func (gih *issueHandler) CloseIssueForTest(testName string) error {
|
|||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return alerter.Run(
|
||||
return helpers.Run(
|
||||
"closing issue",
|
||||
func() error {
|
||||
return gih.client.CloseIssue(org, repo, issueNumber)
|
||||
|
@ -173,8 +178,8 @@ func (gih *issueHandler) CloseIssueForTest(testName string) error {
|
|||
}
|
||||
|
||||
// reopenIssue will reopen the given issue.
|
||||
func (gih *issueHandler) reopenIssue(org, repo string, issueNumber int, dryrun bool) error {
|
||||
return alerter.Run(
|
||||
func (gih *IssueHandler) reopenIssue(org, repo string, issueNumber int, dryrun bool) error {
|
||||
return helpers.Run(
|
||||
"reopen the issue",
|
||||
func() error {
|
||||
return gih.client.ReopenIssue(org, repo, issueNumber)
|
||||
|
@ -184,9 +189,9 @@ func (gih *issueHandler) reopenIssue(org, repo string, issueNumber int, dryrun b
|
|||
}
|
||||
|
||||
// findIssue will return the issue in the given repo if it exists.
|
||||
func (gih *issueHandler) findIssue(org, repo, title string, dryrun bool) *github.Issue {
|
||||
func (gih *IssueHandler) findIssue(org, repo, title string, dryrun bool) *github.Issue {
|
||||
var issues []*github.Issue
|
||||
alerter.Run(
|
||||
helpers.Run(
|
||||
"list issues in the repo",
|
||||
func() error {
|
||||
var err error
|
||||
|
@ -204,8 +209,8 @@ func (gih *issueHandler) findIssue(org, repo, title string, dryrun bool) *github
|
|||
}
|
||||
|
||||
// addComment will add comment for the given issue.
|
||||
func (gih *issueHandler) addComment(org, repo string, issueNumber int, commentBody string, dryrun bool) error {
|
||||
return alerter.Run(
|
||||
func (gih *IssueHandler) addComment(org, repo string, issueNumber int, commentBody string, dryrun bool) error {
|
||||
return helpers.Run(
|
||||
"add comment for issue",
|
||||
func() error {
|
||||
_, err := gih.client.CreateComment(org, repo, issueNumber, commentBody)
|
||||
|
|
|
@ -17,30 +17,40 @@ limitations under the License.
|
|||
package slack
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"knative.dev/pkg/test/mako/alerter"
|
||||
"knative.dev/pkg/test/helpers"
|
||||
"knative.dev/pkg/test/slackutil"
|
||||
)
|
||||
|
||||
const messageTemplate = `
|
||||
var minInterval = flag.Duration("min-alert-interval", 24*time.Hour, "The minimum interval of sending Slack alerts.")
|
||||
|
||||
const (
|
||||
messageTemplate = `
|
||||
As of %s, there is a new performance regression detected from automation test:
|
||||
%s`
|
||||
)
|
||||
|
||||
// messageHandler handles methods for slack messages
|
||||
type messageHandler struct {
|
||||
client slackutil.Operations
|
||||
config repoConfig
|
||||
dryrun bool
|
||||
// MessageHandler handles methods for slack messages
|
||||
type MessageHandler struct {
|
||||
readClient slackutil.ReadOperations
|
||||
writeClient slackutil.WriteOperations
|
||||
config repoConfig
|
||||
dryrun bool
|
||||
}
|
||||
|
||||
// Setup creates the necessary setup to make calls to work with slack
|
||||
func Setup(userName, tokenPath, repo string, dryrun bool) (*messageHandler, error) {
|
||||
client, err := slackutil.NewClient(userName, tokenPath)
|
||||
func Setup(userName, readTokenPath, writeTokenPath, repo string, dryrun bool) (*MessageHandler, error) {
|
||||
readClient, err := slackutil.NewReadClient(userName, readTokenPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot authenticate to slack: %v", err)
|
||||
return nil, fmt.Errorf("cannot authenticate to slack read client: %v", err)
|
||||
}
|
||||
writeClient, err := slackutil.NewWriteClient(userName, writeTokenPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot authenticate to slack write client: %v", err)
|
||||
}
|
||||
var config *repoConfig
|
||||
for _, repoConfig := range repoConfigs {
|
||||
|
@ -52,30 +62,66 @@ func Setup(userName, tokenPath, repo string, dryrun bool) (*messageHandler, erro
|
|||
if config == nil {
|
||||
return nil, fmt.Errorf("no channel configuration found for repo %v", repo)
|
||||
}
|
||||
return &messageHandler{client: client, config: *config, dryrun: dryrun}, nil
|
||||
return &MessageHandler{
|
||||
readClient: readClient,
|
||||
writeClient: writeClient,
|
||||
config: *config,
|
||||
dryrun: dryrun,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Post will post the given text to the slack channel(s)
|
||||
func (smh *messageHandler) Post(text string) error {
|
||||
// TODO(Fredy-Z): add deduplication logic, maybe do not send more than one alert within 24 hours?
|
||||
errs := make([]error, 0)
|
||||
// SendAlert will send the alert text to the slack channel(s)
|
||||
func (smh *MessageHandler) SendAlert(text string) error {
|
||||
dryrun := smh.dryrun
|
||||
channels := smh.config.channels
|
||||
mux := &sync.Mutex{}
|
||||
errCh := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
for i := range channels {
|
||||
channel := channels[i]
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// get the recent message history in the channel for this user
|
||||
startTime := time.Now().Add(-1 * *minInterval)
|
||||
var messageHistory []string
|
||||
if err := helpers.Run(
|
||||
fmt.Sprintf("retrieving message history in channel %q", channel.name),
|
||||
func() error {
|
||||
var err error
|
||||
messageHistory, err = smh.readClient.MessageHistory(channel.identity, startTime)
|
||||
return err
|
||||
},
|
||||
dryrun,
|
||||
); err != nil {
|
||||
errCh <- fmt.Errorf("failed to retrieve message history in channel %q", channel.name)
|
||||
}
|
||||
// do not send message again if messages were sent on the same channel a short while ago
|
||||
if len(messageHistory) != 0 {
|
||||
return
|
||||
}
|
||||
// send the alert message to the channel
|
||||
message := fmt.Sprintf(messageTemplate, time.Now(), text)
|
||||
if err := smh.client.Post(message, channel.identity); err != nil {
|
||||
mux.Lock()
|
||||
errs = append(errs, fmt.Errorf("failed to send message to channel %v", channel))
|
||||
mux.Unlock()
|
||||
if err := helpers.Run(
|
||||
fmt.Sprintf("sending message %q to channel %q", message, channel.name),
|
||||
func() error {
|
||||
return smh.writeClient.Post(message, channel.identity)
|
||||
},
|
||||
dryrun,
|
||||
); err != nil {
|
||||
errCh <- fmt.Errorf("failed to send message to channel %q", channel.name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return alerter.CombineErrors(errs)
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
errs := make([]error, 0)
|
||||
for err := range errCh {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return helpers.CombineErrors(errs)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// fakeslackutil.go fakes SlackClient for testing purpose
|
||||
|
||||
package fakeslackutil
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type messageEntry struct {
|
||||
text string
|
||||
sentTime time.Time
|
||||
}
|
||||
|
||||
// FakeSlackClient is a faked client, implements all functions of slackutil.ReadOperations and slackutil.WriteOperations
|
||||
type FakeSlackClient struct {
|
||||
History map[string][]messageEntry
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewFakeSlackClient creates a FakeSlackClient and initialize it's maps
|
||||
func NewFakeSlackClient() *FakeSlackClient {
|
||||
return &FakeSlackClient{
|
||||
History: make(map[string][]messageEntry), // map of channel name: slice of messages sent to the channel
|
||||
mutex: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// MessageHistory returns the messages to the channel from the given startTime
|
||||
func (c *FakeSlackClient) MessageHistory(channel string, startTime time.Time) ([]string, error) {
|
||||
c.mutex.Lock()
|
||||
messages := make([]string, 0)
|
||||
if history, ok := c.History[channel]; ok {
|
||||
for _, msg := range history {
|
||||
if time.Now().After(startTime) {
|
||||
messages = append(messages, msg.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// Post sends the text as a message to the given channel
|
||||
func (c *FakeSlackClient) Post(text, channel string) error {
|
||||
c.mutex.Lock()
|
||||
messages := make([]messageEntry, 0)
|
||||
if history, ok := c.History[channel]; ok {
|
||||
messages = history
|
||||
}
|
||||
messages = append(messages, messageEntry{text: text, sentTime: time.Now()})
|
||||
c.History[channel] = messages
|
||||
c.mutex.Unlock()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// http.go includes functions to send HTTP requests.
|
||||
|
||||
package slackutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// post sends an HTTP post request
|
||||
func post(url string, uv url.Values) ([]byte, error) {
|
||||
resp, err := http.PostForm(url, uv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleResponse(resp)
|
||||
}
|
||||
|
||||
// get sends an HTTP get request
|
||||
func get(url string) ([]byte, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleResponse(resp)
|
||||
}
|
||||
|
||||
// handleResponse handles the HTTP response and returns the body content
|
||||
func handleResponse(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("http response code is not StatusOK: '%v'", resp.StatusCode)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// message_read.go includes functions to read messages from Slack.
|
||||
|
||||
package slackutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const conversationHistoryURL = "https://slack.com/api/conversations.history"
|
||||
|
||||
// ReadOperations defines the read operations that can be done to Slack
|
||||
type ReadOperations interface {
|
||||
MessageHistory(channel string, startTime time.Time) ([]string, error)
|
||||
}
|
||||
|
||||
// readClient contains Slack bot related information to perform read operations
|
||||
type readClient struct {
|
||||
userName string
|
||||
tokenStr string
|
||||
}
|
||||
|
||||
// NewReadClient reads token file and stores it for later authentication
|
||||
func NewReadClient(userName, tokenPath string) (ReadOperations, error) {
|
||||
b, err := ioutil.ReadFile(tokenPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &readClient{
|
||||
userName: userName,
|
||||
tokenStr: strings.TrimSpace(string(b)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *readClient) MessageHistory(channel string, startTime time.Time) ([]string, error) {
|
||||
u, _ := url.Parse(conversationHistoryURL)
|
||||
q := u.Query()
|
||||
q.Add("username", c.userName)
|
||||
q.Add("token", c.tokenStr)
|
||||
q.Add("channel", channel)
|
||||
q.Add("oldest", strconv.FormatInt(startTime.Unix(), 10))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
content, err := get(u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// response code could also be 200 if channel doesn't exist, parse response body to find out
|
||||
type m struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
var r struct {
|
||||
OK bool `json:"ok"`
|
||||
Messages []m `json:"messages"`
|
||||
}
|
||||
if err = json.Unmarshal(content, &r); nil != err || !r.OK {
|
||||
return nil, fmt.Errorf("response not ok '%s'", string(content))
|
||||
}
|
||||
|
||||
res := make([]string, len(r.Messages))
|
||||
for i, message := range r.Messages {
|
||||
res[i] = message.Text
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// messaging.go includes functions to send message to Slack channel.
|
||||
// message_write.go includes functions to send messages to Slack.
|
||||
|
||||
package slackutil
|
||||
|
||||
|
@ -24,67 +24,54 @@ import (
|
|||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const postMessageURL = "https://slack.com/api/chat.postMessage"
|
||||
|
||||
// Operations defines the operations that can be done to Slack
|
||||
type Operations interface {
|
||||
// WriteOperations defines the write operations that can be done to Slack
|
||||
type WriteOperations interface {
|
||||
Post(text, channel string) error
|
||||
}
|
||||
|
||||
// client contains Slack bot related information
|
||||
type client struct {
|
||||
userName string
|
||||
tokenStr string
|
||||
iconEmoji *string
|
||||
// writeClient contains Slack bot related information to perform write operations
|
||||
type writeClient struct {
|
||||
userName string
|
||||
tokenStr string
|
||||
}
|
||||
|
||||
// NewClient reads token file and stores it for later authentication
|
||||
func NewClient(userName, tokenPath string) (Operations, error) {
|
||||
// NewWriteClient reads token file and stores it for later authentication
|
||||
func NewWriteClient(userName, tokenPath string) (WriteOperations, error) {
|
||||
b, err := ioutil.ReadFile(tokenPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &client{
|
||||
return &writeClient{
|
||||
userName: userName,
|
||||
tokenStr: strings.TrimSpace(string(b)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Post posts the given text to channel
|
||||
func (c *client) Post(text, channel string) error {
|
||||
func (c *writeClient) Post(text, channel string) error {
|
||||
uv := url.Values{}
|
||||
uv.Add("username", c.userName)
|
||||
uv.Add("token", c.tokenStr)
|
||||
if nil != c.iconEmoji {
|
||||
uv.Add("icon_emoji", *c.iconEmoji)
|
||||
}
|
||||
uv.Add("channel", channel)
|
||||
uv.Add("text", text)
|
||||
|
||||
return c.postMessage(uv)
|
||||
}
|
||||
|
||||
// postMessage does http post
|
||||
func (c *client) postMessage(uv url.Values) error {
|
||||
resp, err := http.PostForm(postMessageURL, uv)
|
||||
content, err := post(postMessageURL, uv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
t, _ := ioutil.ReadAll(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("http response code is not '%d': '%s'", http.StatusOK, string(t))
|
||||
}
|
||||
|
||||
// response code could also be 200 if channel doesn't exist, parse response body to find out
|
||||
var b struct {
|
||||
OK bool `json:"ok"`
|
||||
}
|
||||
if err = json.Unmarshal(t, &b); nil != err || !b.OK {
|
||||
return fmt.Errorf("response not ok '%s'", string(t))
|
||||
if err = json.Unmarshal(content, &b); nil != err || !b.OK {
|
||||
return fmt.Errorf("response not ok '%s'", string(content))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -16,6 +16,10 @@ limitations under the License.
|
|||
|
||||
package coveragecalculator
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// CoverageValues encapsulates all the coverage related values.
|
||||
type CoverageValues struct {
|
||||
TotalFields int
|
||||
|
@ -25,12 +29,37 @@ type CoverageValues struct {
|
|||
PercentCoverage float64
|
||||
}
|
||||
|
||||
// CoveragePercentages encapsulate percentage coverage for resources.
|
||||
type CoveragePercentages struct {
|
||||
|
||||
// ResourceCoverages maps percentage coverage per resource.
|
||||
ResourceCoverages map[string]float64
|
||||
}
|
||||
|
||||
// CalculatePercentageValue calculates percentage value based on other fields.
|
||||
func (c *CoverageValues) CalculatePercentageValue() {
|
||||
if c.TotalFields > 0 {
|
||||
c.PercentCoverage = (float64(c.CoveredFields) / float64(c.TotalFields-c.IgnoredFields)) * 100
|
||||
}
|
||||
}
|
||||
|
||||
// GetAndRemoveResourceValue utility method to implement "get and delete"
|
||||
// semantics. This makes templating operations easy.
|
||||
func (c *CoveragePercentages) GetAndRemoveResourceValue(resource string) float64 {
|
||||
if resourcePercentage, ok := c.ResourceCoverages[resource]; ok {
|
||||
delete(c.ResourceCoverages, resource)
|
||||
return resourcePercentage
|
||||
}
|
||||
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// IsFailedBuild utility method to indicate if CoveragePercentages indicate
|
||||
// values of a failed build.
|
||||
func (c *CoveragePercentages) IsFailedBuild() bool {
|
||||
return math.Abs(c.ResourceCoverages["Overall"]-0) == 0
|
||||
}
|
||||
|
||||
// CalculateTypeCoverage calculates aggregate coverage values based on provided []TypeCoverage
|
||||
func CalculateTypeCoverage(typeCoverage []TypeCoverage) *CoverageValues {
|
||||
cv := CoverageValues{}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"os/user"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"knative.dev/pkg/test/webhook-apicoverage/coveragecalculator"
|
||||
"knative.dev/pkg/test/webhook-apicoverage/view"
|
||||
"knative.dev/pkg/test/webhook-apicoverage/webhook"
|
||||
|
@ -47,6 +48,10 @@ const (
|
|||
|
||||
// WebhookTotalCoverageEndPoint constant for total coverage API endpoint.
|
||||
WebhookTotalCoverageEndPoint = "https://%s:443" + webhook.TotalCoverageEndPoint
|
||||
|
||||
// WebhookResourcePercentageCoverageEndPoint constant for
|
||||
// ResourcePercentageCoverage API endpoint.
|
||||
WebhookResourcePercentageCoverageEndPoint = "https://%s:443" + webhook.ResourcePercentageCoverageEndPoint
|
||||
)
|
||||
|
||||
// GetDefaultKubePath helper method to fetch kubeconfig path.
|
||||
|
@ -107,14 +112,15 @@ func GetResourceCoverage(webhookIP string, resourceName string) (string, error)
|
|||
}
|
||||
resp, err := client.Get(fmt.Sprintf(WebhookResourceCoverageEndPoint, webhookIP, resourceName))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encountered error making resource coverage request: %v", err)
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return "", errors.Wrap(err, "encountered error making resource coverage request")
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("invalid HTTP Status received for resource coverage request. Status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var body []byte
|
||||
if body, err = ioutil.ReadAll(resp.Body); err != nil {
|
||||
return "", fmt.Errorf("error reading resource coverage response: %v", err)
|
||||
return "", errors.Wrap(err, "Failed reading resource coverage response")
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
|
@ -131,11 +137,7 @@ func GetAndWriteResourceCoverage(webhookIP string, resourceName string, outputFi
|
|||
return err
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(outputFile, []byte(resourceCoverage), 0400); err != nil {
|
||||
return fmt.Errorf("error writing resource coverage to output file: %s, error: %v coverage: %s", outputFile, err, resourceCoverage)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ioutil.WriteFile(outputFile, []byte(resourceCoverage), 0400)
|
||||
}
|
||||
|
||||
// GetTotalCoverage calls the total coverage API to retrieve total coverage values.
|
||||
|
@ -147,8 +149,9 @@ func GetTotalCoverage(webhookIP string) (*coveragecalculator.CoverageValues, err
|
|||
|
||||
resp, err := client.Get(fmt.Sprintf(WebhookTotalCoverageEndPoint, webhookIP))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encountered error making total coverage request: %v", err)
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.Wrap(err, "encountered error making total coverage request")
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("invalid HTTP Status received for total coverage request. Status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
|
@ -159,7 +162,7 @@ func GetTotalCoverage(webhookIP string) (*coveragecalculator.CoverageValues, err
|
|||
|
||||
var coverage coveragecalculator.CoverageValues
|
||||
if err = json.Unmarshal(body, &coverage); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling response to CoverageValues instance: %v", err)
|
||||
return nil, errors.Wrap(err, "Failed unmarshalling response to CoverageValues instance")
|
||||
}
|
||||
|
||||
return &coverage, nil
|
||||
|
@ -176,13 +179,53 @@ func GetAndWriteTotalCoverage(webhookIP string, outputFile string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if htmlData, err := view.GetHTMLCoverageValuesDisplay(totalCoverage); err != nil {
|
||||
return fmt.Errorf("error building html file from total coverage. error: %v", err)
|
||||
} else {
|
||||
if err = ioutil.WriteFile(outputFile, []byte(htmlData), 0400); err != nil {
|
||||
return fmt.Errorf("error writing total coverage to output file: %s, error: %v", outputFile, err)
|
||||
}
|
||||
htmlData, err := view.GetHTMLCoverageValuesDisplay(totalCoverage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building html file from total coverage. error")
|
||||
}
|
||||
|
||||
return nil
|
||||
return ioutil.WriteFile(outputFile, []byte(htmlData), 0400)
|
||||
}
|
||||
|
||||
// GetResourcePercentages calls resource percentage coverage API to retrieve
|
||||
// percentage values.
|
||||
func GetResourcePercentages(webhookIP string) (
|
||||
*coveragecalculator.CoveragePercentages, error) {
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.Get(fmt.Sprintf(WebhookResourcePercentageCoverageEndPoint,
|
||||
webhookIP))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "encountered error making resource percentage coverage request")
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("Invalid HTTP Status received for resource"+
|
||||
" percentage coverage request. Status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var body []byte
|
||||
if body, err = ioutil.ReadAll(resp.Body); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed reading resource percentage coverage response")
|
||||
}
|
||||
|
||||
coveragePercentages := &coveragecalculator.CoveragePercentages{}
|
||||
if err = json.Unmarshal(body, coveragePercentages); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed unmarshalling response to CoveragePercentages instance")
|
||||
}
|
||||
|
||||
return coveragePercentages, nil
|
||||
}
|
||||
|
||||
// WriteResourcePercentages writes CoveragePercentages to junit_xml output file.
|
||||
func WriteResourcePercentages(outputFile string,
|
||||
coveragePercentages *coveragecalculator.CoveragePercentages) error {
|
||||
htmlData, err := view.GetCoveragePercentageXMLDisplay(coveragePercentages)
|
||||
if err != nil {
|
||||
errors.Wrap(err, "Failed building coverage percentage xml file")
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(outputFile, []byte(htmlData), 0400)
|
||||
}
|
||||
|
|
|
@ -37,3 +37,8 @@ Covered Fields: <Number of fields covered>
|
|||
Ignored Fields: <Number of fields ignored>
|
||||
Coverage Percentage: <Percentage value of coverage>
|
||||
```
|
||||
|
||||
`GetCoveragePercentageXMLDisplay()` is a utility method that can be used by
|
||||
repos to produce coverage percentage for each resource in a Junit XML results
|
||||
file. The method takes [CoveragePercentages](../coveragecalculator/calculator.go)
|
||||
as input and produces a Junit result file format.
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<style type="text/css">
|
||||
<!--
|
||||
|
||||
.tab { margin-left: 50px; }
|
||||
|
||||
.styleheader {color: white; size: A4}
|
||||
|
||||
.values {color: yellow; size: A3}
|
||||
|
||||
table, th, td { border: 1px solid white; text-align: center}
|
||||
|
||||
.braces {color: white; size: A3}
|
||||
-->
|
||||
</style>
|
||||
<body style="background-color:rgb(0,0,0); font-family: Arial">
|
||||
<table style="width: 30%">
|
||||
<tr class="styleheader"><td>Total Fields</td><td>{{ .TotalFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Covered Fields</td><td>{{ .CoveredFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Ignored Fields</td><td>{{ .IgnoredFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Coverage Percentage</td><td>{{ .PercentCoverage }}</td></tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
|
@ -28,15 +28,16 @@ type HtmlDisplayData struct {
|
|||
CoverageNumbers *coveragecalculator.CoverageValues
|
||||
}
|
||||
|
||||
// GetHTMLDisplay is a helper method to display API Coverage details in json-like format inside a HTML page.
|
||||
func GetHTMLDisplay(coverageData []coveragecalculator.TypeCoverage, coverageValues *coveragecalculator.CoverageValues) (string, error) {
|
||||
|
||||
// GetHTMLDisplay is a helper method to display API Coverage details in
|
||||
// json-like format inside a HTML page.
|
||||
func GetHTMLDisplay(coverageData []coveragecalculator.TypeCoverage,
|
||||
coverageValues *coveragecalculator.CoverageValues) (string, error) {
|
||||
htmlData := HtmlDisplayData{
|
||||
TypeCoverages: coverageData,
|
||||
CoverageNumbers: coverageValues,
|
||||
}
|
||||
|
||||
tmpl, err := template.ParseFiles("type_coverage.html")
|
||||
tmpl, err := template.New("TypeCoverage").Parse(TypeCoverageTempl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -52,8 +53,7 @@ func GetHTMLDisplay(coverageData []coveragecalculator.TypeCoverage, coverageValu
|
|||
|
||||
// GetHTMLCoverageValuesDisplay is a helper method to display coverage values inside a HTML table.
|
||||
func GetHTMLCoverageValuesDisplay(coverageValues *coveragecalculator.CoverageValues) (string, error) {
|
||||
|
||||
tmpl, err := template.ParseFiles("aggregate_coverage.html")
|
||||
tmpl, err := template.New("AggregateCoverage").Parse(AggregateCoverageTmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -1,4 +1,26 @@
|
|||
<!DOCTYPE html>
|
||||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package view
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var TypeCoverageTempl = fmt.Sprint(`<!DOCTYPE html>
|
||||
<html>
|
||||
<style type="text/css">
|
||||
<!--
|
||||
|
@ -59,3 +81,31 @@
|
|||
</table>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
||||
|
||||
var AggregateCoverageTmpl = fmt.Sprint(`<!DOCTYPE html>
|
||||
<html>
|
||||
<style type="text/css">
|
||||
<!--
|
||||
|
||||
.tab { margin-left: 50px; }
|
||||
|
||||
.styleheader {color: white; size: A4}
|
||||
|
||||
.values {color: yellow; size: A3}
|
||||
|
||||
table, th, td { border: 1px solid white; text-align: center}
|
||||
|
||||
.braces {color: white; size: A3}
|
||||
-->
|
||||
</style>
|
||||
<body style="background-color:rgb(0,0,0); font-family: Arial">
|
||||
<table style="width: 30%">
|
||||
<tr class="styleheader"><td>Total Fields</td><td>{{ .TotalFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Covered Fields</td><td>{{ .CoveredFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Ignored Fields</td><td>{{ .IgnoredFields }}</td></tr>
|
||||
<tr class="styleheader"><td>Coverage Percentage</td><td>{{ .PercentCoverage }}</td></tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package view
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"knative.dev/pkg/test/webhook-apicoverage/coveragecalculator"
|
||||
)
|
||||
|
||||
// GetCoveragePercentageXMLDisplay is a helper method to write resource coverage
|
||||
// percentage values to junit xml file format.
|
||||
func GetCoveragePercentageXMLDisplay(
|
||||
percentageCoverages *coveragecalculator.CoveragePercentages) (string, error) {
|
||||
tmpl, err := template.New("JunitResult").Parse(JunitResultTmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var buffer strings.Builder
|
||||
err = tmpl.Execute(&buffer, percentageCoverages)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buffer.String(), nil
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package view
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var JunitResultTmpl = fmt.Sprint(`<testsuites>
|
||||
<testsuite name="" time="0" {{ if .IsFailedBuild }} failures="1" {{ else }} failures = "0" {{ end }} tests="0">
|
||||
<testcase name="Overall" time="0" classname="go_coverage">
|
||||
{{ if .IsFailedBuild }}
|
||||
<failure>true</failure>
|
||||
{{ end }}
|
||||
<properties>
|
||||
<property name="coverage" value="{{ .GetAndRemoveResourceValue "Overall" }}"/>
|
||||
</properties>
|
||||
</testcase>
|
||||
{{ range $key, $value := .ResourceCoverages }}
|
||||
<testcase name="{{ $key }}" time="0" classname="go_coverage">
|
||||
<properties>
|
||||
<property name="coverage" value="{{ $value }}"/>
|
||||
</properties>
|
||||
</testcase>
|
||||
{{end}}
|
||||
</testsuite>
|
||||
</testsuites>`)
|
|
@ -50,6 +50,10 @@ const (
|
|||
// TotalCoverageEndPoint is the endpoint for Total Coverage API
|
||||
TotalCoverageEndPoint = "/totalcoverage"
|
||||
|
||||
// ResourcePercentageCoverageEndPoint is the end point for Resource Percentage
|
||||
// coverages API
|
||||
ResourcePercentageCoverageEndPoint = "/resourcepercentagecoverage"
|
||||
|
||||
// resourceChannelQueueSize size of the queue maintained for resource channel.
|
||||
resourceChannelQueueSize = 10
|
||||
)
|
||||
|
@ -209,3 +213,51 @@ func (a *APICoverageRecorder) GetTotalCoverage(w http.ResponseWriter, r *http.Re
|
|||
fmt.Fprintf(w, "error writing total coverage response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceCoveragePercentags goes over all the resources setup for the
|
||||
// apicoverage tool and returns percentage coverage for each resource.
|
||||
func (a *APICoverageRecorder) GetResourceCoveragePercentages(
|
||||
w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ignoredFields coveragecalculator.IgnoredFields
|
||||
err error
|
||||
)
|
||||
|
||||
ignoredFieldsFilePath :=
|
||||
os.Getenv("KO_DATA_PATH") + "/ignoredfields.yaml"
|
||||
if err = ignoredFields.ReadFromFile(ignoredFieldsFilePath); err != nil {
|
||||
a.Logger.Errorf("Error reading file %s: %v",
|
||||
ignoredFieldsFilePath, err)
|
||||
}
|
||||
|
||||
totalCoverage := coveragecalculator.CoverageValues{}
|
||||
percentCoverages := make(map[string]float64)
|
||||
for resource := range a.ResourceMap {
|
||||
tree := a.ResourceForest.TopLevelTrees[resource.Kind]
|
||||
typeCoverage := tree.BuildCoverageData(a.NodeRules, a.FieldRules,
|
||||
ignoredFields)
|
||||
coverageValues := coveragecalculator.CalculateTypeCoverage(typeCoverage)
|
||||
coverageValues.CalculatePercentageValue()
|
||||
percentCoverages[resource.Kind] = coverageValues.PercentCoverage
|
||||
totalCoverage.TotalFields += coverageValues.TotalFields
|
||||
totalCoverage.CoveredFields += coverageValues.CoveredFields
|
||||
totalCoverage.IgnoredFields += coverageValues.IgnoredFields
|
||||
}
|
||||
totalCoverage.CalculatePercentageValue()
|
||||
percentCoverages["Overall"] = totalCoverage.PercentCoverage
|
||||
|
||||
var body []byte
|
||||
if body, err = json.Marshal(
|
||||
coveragecalculator.CoveragePercentages{
|
||||
ResourceCoverages: percentCoverages,
|
||||
}); err != nil {
|
||||
fmt.Fprintf(w, "error marshalling percentage coverage response: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(body); err != nil {
|
||||
fmt.Fprintf(w, "error writing percentage coverage response: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,6 @@ type Client interface {
|
|||
// ClusterOperations contains all provider specific logics
|
||||
type ClusterOperations interface {
|
||||
Provider() string
|
||||
Initialize() error
|
||||
Acquire() error
|
||||
Delete() error
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package clustermanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
@ -41,7 +42,9 @@ const (
|
|||
|
||||
var (
|
||||
DefaultGKEBackupRegions = []string{"us-west1", "us-east1"}
|
||||
// This is an arbitrary number determined based on past experience
|
||||
protectedProjects = []string{"knative-tests"}
|
||||
protectedClusters = []string{"knative-prow"}
|
||||
// These are arbitrary numbers determined based on past experience
|
||||
creationTimeout = 20 * time.Minute
|
||||
)
|
||||
|
||||
|
@ -74,6 +77,7 @@ type GKECluster struct {
|
|||
type GKESDKOperations interface {
|
||||
create(string, string, *container.CreateClusterRequest) (*container.Operation, error)
|
||||
get(string, string, string) (*container.Cluster, error)
|
||||
getOperation(string, string, string) (*container.Operation, error)
|
||||
}
|
||||
|
||||
// GKESDKClient Implement GKESDKOperations
|
||||
|
@ -91,6 +95,11 @@ func (gsc *GKESDKClient) get(project, location, cluster string) (*container.Clus
|
|||
return gsc.Projects.Locations.Clusters.Get(clusterFullPath).Context(context.Background()).Do()
|
||||
}
|
||||
|
||||
func (gsc *GKESDKClient) getOperation(project, location, opName string) (*container.Operation, error) {
|
||||
name := fmt.Sprintf("projects/%s/locations/%s/operations/%s", project, location, opName)
|
||||
return gsc.Service.Projects.Locations.Operations.Get(name).Do()
|
||||
}
|
||||
|
||||
// Setup sets up a GKECluster client.
|
||||
// numNodes: default to 3 if not provided
|
||||
// nodeType: default to n1-standard-4 if not provided
|
||||
|
@ -166,9 +175,12 @@ func (gc *GKECluster) Initialize() error {
|
|||
}
|
||||
}
|
||||
if nil == gc.Project || "" == *gc.Project {
|
||||
return fmt.Errorf("gcp project must be set")
|
||||
return errors.New("gcp project must be set")
|
||||
}
|
||||
log.Printf("use project '%s' for running test", *gc.Project)
|
||||
if !common.IsProw() && nil == gc.Cluster {
|
||||
gc.NeedCleanup = true
|
||||
}
|
||||
log.Printf("Using project %q for running test", *gc.Project)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -182,6 +194,7 @@ func (gc *GKECluster) Provider() string {
|
|||
// in us-central1, and default BackupRegions are us-west1 and us-east1. If
|
||||
// Region or Zone is provided then there is no retries
|
||||
func (gc *GKECluster) Acquire() error {
|
||||
gc.ensureProtected()
|
||||
var err error
|
||||
// Check if using existing cluster
|
||||
if nil != gc.Cluster {
|
||||
|
@ -217,41 +230,30 @@ func (gc *GKECluster) Acquire() error {
|
|||
},
|
||||
ProjectId: *gc.Project,
|
||||
}
|
||||
log.Printf("Creating cluster in %s", getClusterLocation(region, gc.Request.Zone))
|
||||
_, err = gc.operations.create(*gc.Project, getClusterLocation(region, gc.Request.Zone), rb)
|
||||
|
||||
clusterLoc := getClusterLocation(region, gc.Request.Zone)
|
||||
// TODO(chaodaiG): add deleting logic once cluster deletion logic is done
|
||||
|
||||
log.Printf("Creating cluster %q' in %q", clusterName, clusterLoc)
|
||||
var createOp *container.Operation
|
||||
createOp, err = gc.operations.create(*gc.Project, clusterLoc, rb)
|
||||
if nil == err {
|
||||
// The process above doesn't seem to wait, wait for it
|
||||
log.Printf("Waiting for cluster creation")
|
||||
timeout := time.After(creationTimeout)
|
||||
tick := time.Tick(50 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
// Got a timeout! fail with a timeout error
|
||||
case <-timeout:
|
||||
err = fmt.Errorf("timed out waiting for cluster creation")
|
||||
break
|
||||
case <-tick:
|
||||
cluster, err = gc.operations.get(*gc.Project, getClusterLocation(region, gc.Request.Zone), clusterName)
|
||||
}
|
||||
if err != nil || cluster.Status == "RUNNING" {
|
||||
break
|
||||
}
|
||||
if cluster.Status != "PROVISIONING" {
|
||||
err = fmt.Errorf("cluster in bad state: '%s'", cluster.Status)
|
||||
break
|
||||
}
|
||||
if err = gc.wait(clusterLoc, createOp.Name, creationTimeout); nil == err {
|
||||
cluster, err = gc.operations.get(*gc.Project, clusterLoc, rb.Cluster.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if nil != err {
|
||||
errMsg := fmt.Sprintf("error creating cluster: '%v'", err)
|
||||
if gc.NeedCleanup { // Delete half created cluster if it's user created
|
||||
// TODO(chaodaiG): add this part when deletion logic is done
|
||||
}
|
||||
// TODO(chaodaiG): catch specific errors as we know what the error look like for stockout etc.
|
||||
if len(regions) != i+1 {
|
||||
errMsg = fmt.Sprintf("%s. Retry another region '%s' for cluster creation", errMsg, regions[i+1])
|
||||
}
|
||||
log.Printf(errMsg)
|
||||
} else {
|
||||
log.Printf("cluster creation succeeded")
|
||||
log.Print("Cluster creation completed")
|
||||
gc.Cluster = cluster
|
||||
break
|
||||
}
|
||||
|
@ -260,13 +262,68 @@ func (gc *GKECluster) Acquire() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Delete deletes a GKE cluster
|
||||
func (gc *GKECluster) Delete() error {
|
||||
if !gc.NeedCleanup {
|
||||
return nil
|
||||
// wait depends on unique opName(operation ID created by cloud), and waits until
|
||||
// it's done
|
||||
func (gc *GKECluster) wait(location, opName string, wait time.Duration) error {
|
||||
const (
|
||||
pendingStatus = "PENDING"
|
||||
runningStatus = "RUNNING"
|
||||
doneStatus = "DONE"
|
||||
)
|
||||
var op *container.Operation
|
||||
var err error
|
||||
|
||||
timeout := time.After(wait)
|
||||
tick := time.Tick(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
// Got a timeout! fail with a timeout error
|
||||
case <-timeout:
|
||||
return errors.New("timed out waiting")
|
||||
case <-tick:
|
||||
// Retry 3 times in case of weird network error, or rate limiting
|
||||
for r, w := 0, 50*time.Microsecond; r < 3; r, w = r+1, w*2 {
|
||||
op, err = gc.operations.getOperation(*gc.Project, location, opName)
|
||||
if nil == err {
|
||||
if op.Status == doneStatus {
|
||||
return nil
|
||||
} else if op.Status == pendingStatus || op.Status == runningStatus {
|
||||
// Valid operation, no need to retry
|
||||
break
|
||||
} else {
|
||||
// Have seen intermittent error state and fixed itself,
|
||||
// let it retry to avoid too much flakiness
|
||||
err = fmt.Errorf("unexpected operation status: %q", op.Status)
|
||||
}
|
||||
}
|
||||
time.Sleep(w)
|
||||
}
|
||||
// If err still persist after retries, exit
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ensureProtected ensures not operating on protected project/cluster
|
||||
func (gc *GKECluster) ensureProtected() {
|
||||
if nil != gc.Project {
|
||||
for _, pp := range protectedProjects {
|
||||
if *gc.Project == pp {
|
||||
log.Fatalf("project %q is protected", *gc.Project)
|
||||
}
|
||||
}
|
||||
}
|
||||
if nil != gc.Cluster {
|
||||
for _, pc := range protectedClusters {
|
||||
if gc.Cluster.Name == pc {
|
||||
log.Fatalf("cluster %q is protected", gc.Cluster.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: Perform GKE specific cluster deletion logics
|
||||
return nil
|
||||
}
|
||||
|
||||
// checks for existing cluster by looking at kubeconfig,
|
||||
|
|
|
@ -2,12 +2,14 @@ package tracing
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/stackdriver"
|
||||
oczipkin "contrib.go.opencensus.io/exporter/zipkin"
|
||||
zipkin "github.com/openzipkin/zipkin-go"
|
||||
"github.com/openzipkin/zipkin-go"
|
||||
httpreporter "github.com/openzipkin/zipkin-go/reporter/http"
|
||||
"go.opencensus.io/trace"
|
||||
"go.uber.org/zap"
|
||||
|
@ -16,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
// ConfigOption is the interface for adding additional exporters and configuring opencensus tracing.
|
||||
type ConfigOption func(*config.Config)
|
||||
type ConfigOption func(*config.Config) error
|
||||
|
||||
// OpenCensusTracer is responsible for managing and updating configuration of OpenCensus tracing
|
||||
type OpenCensusTracer struct {
|
||||
|
@ -46,14 +48,16 @@ func (oct *OpenCensusTracer) ApplyConfig(cfg *config.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Short circuit if our config hasnt changed
|
||||
// Short circuit if our config hasn't changed.
|
||||
if oct.curCfg != nil && oct.curCfg.Equals(cfg) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply config options
|
||||
for _, configOpt := range oct.configOptions {
|
||||
configOpt(cfg)
|
||||
if err = configOpt(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set config
|
||||
|
@ -70,7 +74,9 @@ func (oct *OpenCensusTracer) Finish() error {
|
|||
}
|
||||
|
||||
for _, configOpt := range oct.configOptions {
|
||||
configOpt(nil)
|
||||
if err = configOpt(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
globalOct = nil
|
||||
|
||||
|
@ -108,7 +114,7 @@ func createOCTConfig(cfg *config.Config) *trace.Config {
|
|||
// WithExporter returns a ConfigOption for use with NewOpenCensusTracer that configures
|
||||
// it to export traces based on the configuration read from config-tracing.
|
||||
func WithExporter(name string, logger *zap.SugaredLogger) ConfigOption {
|
||||
return func(cfg *config.Config) {
|
||||
return func(cfg *config.Config) error {
|
||||
var (
|
||||
exporter trace.Exporter
|
||||
closer io.Closer
|
||||
|
@ -120,15 +126,25 @@ func WithExporter(name string, logger *zap.SugaredLogger) ConfigOption {
|
|||
})
|
||||
if err != nil {
|
||||
logger.Errorw("error reading project-id from metadata", zap.Error(err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
exporter = exp
|
||||
case config.Zipkin:
|
||||
// If name isn't specified, then zipkin.NewEndpoint will return an error saying that it
|
||||
// can't find the host named ''. So, if not specified, default it to this machine's
|
||||
// hostname.
|
||||
if name == "" {
|
||||
n, err := os.Hostname()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get hostname: %v", err)
|
||||
}
|
||||
name = n
|
||||
}
|
||||
hostPort := name + ":80"
|
||||
zipEP, err := zipkin.NewEndpoint(name, hostPort)
|
||||
if err != nil {
|
||||
logger.Errorw("error building zipkin endpoint", zap.Error(err))
|
||||
return
|
||||
return err
|
||||
}
|
||||
reporter := httpreporter.NewReporter(cfg.ZipkinEndpoint)
|
||||
exporter = oczipkin.NewExporter(reporter, zipEP)
|
||||
|
@ -149,5 +165,7 @@ func WithExporter(name string, logger *zap.SugaredLogger) ConfigOption {
|
|||
|
||||
globalOct.exporter = exporter
|
||||
globalOct.closer = closer
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ type ZipkinReporterFactory func(*config.Config) (zipkinreporter.Reporter, error)
|
|||
// - WithExporter() in production code
|
||||
// - testing/FakeZipkinExporter() in test code.
|
||||
func WithZipkinExporter(reporterFact ZipkinReporterFactory, endpoint *zipkinmodel.Endpoint) ConfigOption {
|
||||
return func(cfg *config.Config) {
|
||||
return func(cfg *config.Config) error {
|
||||
var (
|
||||
reporter zipkinreporter.Reporter
|
||||
exporter trace.Exporter
|
||||
|
@ -43,8 +43,7 @@ func WithZipkinExporter(reporterFact ZipkinReporterFactory, endpoint *zipkinmode
|
|||
// do this before cleanup to minimize time where we have duplicate exporters
|
||||
reporter, err := reporterFact(cfg)
|
||||
if err != nil {
|
||||
// TODO(greghaynes) log this error
|
||||
return
|
||||
return err
|
||||
}
|
||||
exporter := zipkin.NewExporter(reporter, endpoint)
|
||||
trace.RegisterExporter(exporter)
|
||||
|
@ -63,5 +62,7 @@ func WithZipkinExporter(reporterFact ZipkinReporterFactory, endpoint *zipkinmode
|
|||
|
||||
oct.closer = reporter
|
||||
oct.exporter = exporter
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue