Update OpenCensus Stackdriver exporter version (#280)

* split and update

* bug fixed

* change the copyright year

* use fake metadata in tests

* exclude test coverage for gcp_metadata

* revert 2019 change on existing files
This commit is contained in:
Yanwei Guo 2019-02-20 14:18:46 -08:00 committed by Knative Prow Robot
parent b273d660ad
commit 1494b24d7f
20 changed files with 3237 additions and 345 deletions

1
.gitattributes vendored
View File

@ -5,5 +5,6 @@
**/zz_generated.*.go linguist-generated=true
/client/** linguist-generated=true
/test/** coverage-excluded=true
/metrics/gcp_metadata.go coverage-excluded=true
*.sh text eol=lf

17
Gopkg.lock generated
View File

@ -14,15 +14,14 @@
version = "v0.34.0"
[[projects]]
digest = "1:b73244f6a5ab381f938bfd2d831c57eeea884b0fab333126cc175914ec82dec5"
digest = "1:b6eb7c2538ec2999a072c0e372a18d7b7e3aedac249f26e159586fa5f892909f"
name = "contrib.go.opencensus.io/exporter/stackdriver"
packages = [
".",
"monitoredresource",
]
pruneopts = "NUT"
revision = "2b7f4fc93daf5ec3048fa4fc1c15573466711a17"
version = "v0.8.0"
revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
[[projects]]
digest = "1:4a31397b1b81c6856aab6d2d963a727b4235af18adaaedc2cc51646ae812f683"
@ -70,6 +69,18 @@
pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:fa965c1fd0f17153f608037e109e62104058bc1d08d44849867795fd306fa8b8"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/metrics/v1",
"gen-go/resource/v1",
]
pruneopts = "NUT"
revision = "7f2434bc10da710debe5c4315ed6d4df454b4024"
version = "v0.1.0"
[[projects]]
digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377"
name = "github.com/davecgh/go-spew"

View File

@ -46,6 +46,12 @@ required = [
# Needed because release 1.0.0 does not contain a LICENSE file
revision = "4bbc89b6501cca7dd6b5557d78d70c8d2c6e8b97"
[[override]]
name = "contrib.go.opencensus.io/exporter/stackdriver"
# HEAD as of 2019-02-11
# Needed because this includes a fix to support Stackdriver built-in metrics
revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
[[constraint]]
name = "github.com/knative/test-infra"
branch = "master"

View File

@ -15,23 +15,15 @@ package metrics
import (
"fmt"
"net/http"
"sync"
"contrib.go.opencensus.io/exporter/stackdriver"
"contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
"github.com/knative/pkg/metrics/metricskey"
"go.opencensus.io/exporter/prometheus"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
)
var (
curMetricsExporter view.Exporter
curMetricsConfig *metricsConfig
curPromSrv *http.Server
getMonitoredResourceFunc func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface)
metricsMux sync.Mutex
)
@ -64,138 +56,6 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) error
return nil
}
func getKnativeRevisionMonitoredResource(gm *gcpMetadata) func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
return func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
tagsMap := getTagsMap(tags)
kr := &KnativeRevision{
// The first three resource labels are from metadata.
Project: gm.project,
Location: gm.location,
ClusterName: gm.cluster,
// The rest resource labels are from metrics labels.
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
ServiceName: valueOrUnknown(metricskey.LabelServiceName, tagsMap),
ConfigurationName: valueOrUnknown(metricskey.LabelConfigurationName, tagsMap),
RevisionName: valueOrUnknown(metricskey.LabelRevisionName, tagsMap),
}
var newTags []tag.Tag
for _, t := range tags {
// Keep the metrics labels that are not resource labels
if !metricskey.KnativeRevisionLabels.Has(t.Key.Name()) {
newTags = append(newTags, t)
}
}
return newTags, kr
}
}
func getTagsMap(tags []tag.Tag) map[string]string {
tagsMap := map[string]string{}
for _, t := range tags {
tagsMap[t.Key.Name()] = t.Value
}
return tagsMap
}
func valueOrUnknown(key string, tagsMap map[string]string) string {
if value, ok := tagsMap[key]; ok {
return value
}
return metricskey.ValueUnknown
}
func getGlobalMonitoredResource() func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
return func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
return tags, &Global{}
}
}
func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
setMonitoredResourceFunc(config, logger)
e, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: config.stackdriverProjectID,
MetricPrefix: config.domain + "/" + config.component,
GetMonitoredResource: getMonitoredResourceFunc,
DefaultMonitoringLabels: &stackdriver.Labels{},
})
if err != nil {
logger.Error("Failed to create the Stackdriver exporter: ", zap.Error(err))
return nil, err
}
logger.Infof("Created Opencensus Stackdriver exporter with config %v", config)
return e, nil
}
func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component})
if err != nil {
logger.Error("Failed to create the Prometheus exporter.", zap.Error(err))
return nil, err
}
logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config)
// Start the server for Prometheus scraping
go func() {
srv := startNewPromSrv(e)
srv.ListenAndServe()
}()
return e, nil
}
func getCurPromSrv() *http.Server {
metricsMux.Lock()
defer metricsMux.Unlock()
return curPromSrv
}
func resetCurPromSrv() {
metricsMux.Lock()
defer metricsMux.Unlock()
if curPromSrv != nil {
curPromSrv.Close()
curPromSrv = nil
}
}
func resetMonitoredResourceFunc() {
metricsMux.Lock()
defer metricsMux.Unlock()
if getMonitoredResourceFunc != nil {
getMonitoredResourceFunc = nil
}
}
func setMonitoredResourceFunc(config *metricsConfig, logger *zap.SugaredLogger) {
metricsMux.Lock()
defer metricsMux.Unlock()
if getMonitoredResourceFunc == nil {
gm := retrieveGCPMetadata()
metricsPrefix := config.domain + "/" + config.component
logger.Infof("metrics prefix: %s", metricsPrefix)
if metricskey.KnativeRevisionMetricsPrefixes.Has(metricsPrefix) {
getMonitoredResourceFunc = getKnativeRevisionMonitoredResource(gm)
} else {
getMonitoredResourceFunc = getGlobalMonitoredResource()
}
}
}
func startNewPromSrv(e *prometheus.Exporter) *http.Server {
sm := http.NewServeMux()
sm.Handle("/metrics", e)
metricsMux.Lock()
defer metricsMux.Unlock()
if curPromSrv != nil {
curPromSrv.Close()
}
curPromSrv = &http.Server{
Addr: ":9090",
Handler: sm,
}
return curPromSrv
}
func getCurMetricsExporter() view.Exporter {
metricsMux.Lock()
defer metricsMux.Unlock()

View File

@ -15,7 +15,6 @@ package metrics
import (
"os"
"testing"
"time"
. "github.com/knative/pkg/logging/testing"
"github.com/knative/pkg/metrics/metricskey"
@ -67,93 +66,11 @@ func getResourceLabelValue(key string, tags []tag.Tag) string {
func TestMain(m *testing.M) {
resetCurPromSrv()
// Set gcpMetadataFunc for testing
gcpMetadataFunc = fakeGcpMetadataFun
os.Exit(m.Run())
}
func TestNewStackdriverExporterForGlobal(t *testing.T) {
resetMonitoredResourceFunc()
// The stackdriver project ID is required for stackdriver exporter.
e, err := newStackdriverExporter(&metricsConfig{
domain: servingDomain,
component: testComponent,
backendDestination: Stackdriver,
stackdriverProjectID: testProj}, TestLogger(t))
if err != nil {
t.Error(err)
}
if e == nil {
t.Error("expected a non-nil metrics exporter")
}
if getMonitoredResourceFunc == nil {
t.Error("expected a non-nil getMonitoredResourceFunc")
}
newTags, monitoredResource := getMonitoredResourceFunc(testView, testTags)
gotResType, labels := monitoredResource.MonitoredResource()
wantedResType := "global"
if gotResType != wantedResType {
t.Errorf("MonitoredResource=%v, got: %v", wantedResType, gotResType)
}
got := getResourceLabelValue(metricskey.LabelNamespaceName, newTags)
if got != testNS {
t.Errorf("expected new tag %v with value %v, got: %v", routeKey, testNS, newTags)
}
if len(labels) != 0 {
t.Errorf("expected no label, got: %v", labels)
}
}
func TestNewStackdriverExporterForKnativeRevision(t *testing.T) {
resetMonitoredResourceFunc()
e, err := newStackdriverExporter(&metricsConfig{
domain: servingDomain,
component: "autoscaler",
backendDestination: Stackdriver,
stackdriverProjectID: testProj}, TestLogger(t))
if err != nil {
t.Error(err)
}
if e == nil {
t.Error("expected a non-nil metrics exporter")
}
if getMonitoredResourceFunc == nil {
t.Error("expected a non-nil getMonitoredResourceFunc")
}
newTags, monitoredResource := getMonitoredResourceFunc(testView, testTags)
gotResType, labels := monitoredResource.MonitoredResource()
wantedResType := "knative_revision"
if gotResType != wantedResType {
t.Errorf("MonitoredResource=%v, got %v", wantedResType, gotResType)
}
got := getResourceLabelValue(metricskey.LabelRouteName, newTags)
if got != testRoute {
t.Errorf("expected new tag: %v, got: %v", routeKey, newTags)
}
got, ok := labels[metricskey.LabelNamespaceName]
if !ok || got != testNS {
t.Errorf("expected label %v with value %v, got: %v", metricskey.LabelNamespaceName, testNS, got)
}
got, ok = labels[metricskey.LabelConfigurationName]
if !ok || got != metricskey.ValueUnknown {
t.Errorf("expected label %v with value %v, got: %v", metricskey.LabelConfigurationName, metricskey.ValueUnknown, got)
}
}
func TestNewPrometheusExporter(t *testing.T) {
// The stackdriver project ID is not required for prometheus exporter.
e, err := newPrometheusExporter(&metricsConfig{
domain: servingDomain,
component: testComponent,
backendDestination: Prometheus,
stackdriverProjectID: ""}, TestLogger(t))
if err != nil {
t.Error(err)
}
if e == nil {
t.Error("expected a non-nil metrics exporter")
}
expectPromSrv(t)
}
func TestMetricsExporter(t *testing.T) {
err := newMetricsExporter(&metricsConfig{
domain: servingDomain,
@ -205,19 +122,3 @@ func TestInterlevedExporters(t *testing.T) {
t.Error(err)
}
}
func expectPromSrv(t *testing.T) {
time.Sleep(200 * time.Millisecond)
srv := getCurPromSrv()
if srv == nil {
t.Error("expected a server for prometheus exporter")
}
}
func expectNoPromSrv(t *testing.T) {
time.Sleep(200 * time.Millisecond)
srv := getCurPromSrv()
if srv != nil {
t.Error("expected no server for stackdriver exporter")
}
}

40
metrics/gcp_metadata.go Normal file
View File

@ -0,0 +1,40 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"cloud.google.com/go/compute/metadata"
"github.com/knative/pkg/metrics/metricskey"
)
func retrieveGCPMetadata() *gcpMetadata {
gm := gcpMetadata{
project: metricskey.ValueUnknown,
location: metricskey.ValueUnknown,
cluster: metricskey.ValueUnknown,
}
project, err := metadata.NumericProjectID()
if err == nil && project != "" {
gm.project = project
}
location, err := metadata.Zone()
if err == nil && location != "" {
gm.location = location
}
cluster, err := metadata.InstanceAttributeValue("cluster-name")
if err == nil && cluster != "" {
gm.cluster = cluster
}
return &gm
}

View File

@ -61,9 +61,16 @@ var (
LabelRevisionName,
)
// KnativeRevisionMetricsPrefixes stores a set of metrics prefixes that belong to resource type knative_revision
KnativeRevisionMetricsPrefixes = sets.NewString(
"knative.dev/serving/autoscaler",
"knative.dev/serving/activator",
// KnativeRevisionMetrics stores a set of metric types which are supported
// by resource type knative_revision.
KnativeRevisionMetrics = sets.NewString(
"knative.dev/serving/activator/request_count",
"knative.dev/serving/activator/request_latencies",
"knative.dev/serving/autoscaler/desired_pods",
"knative.dev/serving/autoscaler/requested_pods",
"knative.dev/serving/autoscaler/actual_pods",
"knative.dev/serving/autoscaler/stable_request_concurrency",
"knative.dev/serving/autoscaler/panic_request_concurrency",
"knative.dev/serving/autoscaler/target_concurrency_per_pod",
)
)

View File

@ -14,7 +14,6 @@ limitations under the License.
package metrics
import (
"cloud.google.com/go/compute/metadata"
"github.com/knative/pkg/metrics/metricskey"
)
@ -24,15 +23,6 @@ type gcpMetadata struct {
cluster string
}
func newGcpMetadata() gcpMetadata {
gm := gcpMetadata{
project: metricskey.ValueUnknown,
location: metricskey.ValueUnknown,
cluster: metricskey.ValueUnknown,
}
return gm
}
type KnativeRevision struct {
Project string
Location string
@ -56,26 +46,8 @@ func (kr *KnativeRevision) MonitoredResource() (resType string, labels map[strin
return "knative_revision", labels
}
type Global struct {
}
type Global struct{}
func (g *Global) MonitoredResource() (resType string, labels map[string]string) {
return "global", nil
}
func retrieveGCPMetadata() *gcpMetadata {
gm := newGcpMetadata()
project, err := metadata.NumericProjectID()
if err == nil && project != "" {
gm.project = project
}
location, err := metadata.Zone()
if err == nil && location != "" {
gm.location = location
}
cluster, err := metadata.InstanceAttributeValue("cluster-name")
if err == nil && cluster != "" {
gm.cluster = cluster
}
return &gm
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"net/http"
"sync"
"go.opencensus.io/exporter/prometheus"
"go.opencensus.io/stats/view"
"go.uber.org/zap"
)
var (
curPromSrv *http.Server
curPromSrvMux sync.Mutex
)
func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component})
if err != nil {
logger.Error("Failed to create the Prometheus exporter.", zap.Error(err))
return nil, err
}
logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config)
// Start the server for Prometheus scraping
go func() {
srv := startNewPromSrv(e)
srv.ListenAndServe()
}()
return e, nil
}
func getCurPromSrv() *http.Server {
curPromSrvMux.Lock()
defer curPromSrvMux.Unlock()
return curPromSrv
}
func resetCurPromSrv() {
curPromSrvMux.Lock()
defer curPromSrvMux.Unlock()
if curPromSrv != nil {
curPromSrv.Close()
curPromSrv = nil
}
}
func startNewPromSrv(e *prometheus.Exporter) *http.Server {
sm := http.NewServeMux()
sm.Handle("/metrics", e)
curPromSrvMux.Lock()
defer curPromSrvMux.Unlock()
if curPromSrv != nil {
curPromSrv.Close()
}
curPromSrv = &http.Server{
Addr: ":9090",
Handler: sm,
}
return curPromSrv
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"testing"
"time"
. "github.com/knative/pkg/logging/testing"
)
func TestNewPrometheusExporter(t *testing.T) {
// The stackdriver project ID is not required for prometheus exporter.
e, err := newPrometheusExporter(&metricsConfig{
domain: servingDomain,
component: testComponent,
backendDestination: Prometheus,
stackdriverProjectID: ""}, TestLogger(t))
if err != nil {
t.Error(err)
}
if e == nil {
t.Error("expected a non-nil metrics exporter")
}
expectPromSrv(t)
}
func expectPromSrv(t *testing.T) {
time.Sleep(200 * time.Millisecond)
srv := getCurPromSrv()
if srv == nil {
t.Error("expected a server for prometheus exporter")
}
}
func expectNoPromSrv(t *testing.T) {
time.Sleep(200 * time.Millisecond)
srv := getCurPromSrv()
if srv != nil {
t.Error("expected no server for stackdriver exporter")
}
}

View File

@ -0,0 +1,128 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"path"
"contrib.go.opencensus.io/exporter/stackdriver"
"contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
"github.com/knative/pkg/metrics/metricskey"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
)
// customMetricTypeDomain is the metric type prefix for unsupported metrics by
// resource type knative_revision.
// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
const customMetricTypeDomain = "custom.googleapis.com/knative.dev"
var (
// gcpMetadataFunc is the function used to fetch GCP metadata.
// In product usage, this is always set to function retrieveGCPMetadata.
// In unit tests this is set to a fake one to avoid calling GCP metadata
// service.
gcpMetadataFunc func() *gcpMetadata
)
func init() {
// Set gcpMetadataFunc to call GCP metadata service.
gcpMetadataFunc = retrieveGCPMetadata
}
func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
gm := gcpMetadataFunc()
mtf := getMetricTypeFunc(config.domain, config.component)
e, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: config.stackdriverProjectID,
GetMetricDisplayName: mtf, // Use metric type for display name for custom metrics. No impact on built-in metrics.
GetMetricType: mtf,
GetMonitoredResource: getMonitoredResourceFunc(config.domain, config.component, gm),
DefaultMonitoringLabels: &stackdriver.Labels{},
})
if err != nil {
logger.Error("Failed to create the Stackdriver exporter: ", zap.Error(err))
return nil, err
}
logger.Infof("Created Opencensus Stackdriver exporter with config %v", config)
return e, nil
}
func getMonitoredResourceFunc(domain, component string, gm *gcpMetadata) func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
return func(view *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
metricType := path.Join(domain, component, view.Measure.Name())
if metricskey.KnativeRevisionMetrics.Has(metricType) {
return getKnativeRevisionMonitoredResource(view, tags, gm)
}
// Unsupported metric by knative_revision, use "global" resource type.
return getGlobalMonitoredResource(view, tags)
}
}
func getKnativeRevisionMonitoredResource(
v *view.View, tags []tag.Tag, gm *gcpMetadata) ([]tag.Tag, monitoredresource.Interface) {
tagsMap := getTagsMap(tags)
kr := &KnativeRevision{
// The first three resource labels are from metadata.
Project: gm.project,
Location: gm.location,
ClusterName: gm.cluster,
// The rest resource labels are from metrics labels.
NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
ServiceName: valueOrUnknown(metricskey.LabelServiceName, tagsMap),
ConfigurationName: valueOrUnknown(metricskey.LabelConfigurationName, tagsMap),
RevisionName: valueOrUnknown(metricskey.LabelRevisionName, tagsMap),
}
var newTags []tag.Tag
for _, t := range tags {
// Keep the metrics labels that are not resource labels
if !metricskey.KnativeRevisionLabels.Has(t.Key.Name()) {
newTags = append(newTags, t)
}
}
return newTags, kr
}
func getTagsMap(tags []tag.Tag) map[string]string {
tagsMap := map[string]string{}
for _, t := range tags {
tagsMap[t.Key.Name()] = t.Value
}
return tagsMap
}
func valueOrUnknown(key string, tagsMap map[string]string) string {
if value, ok := tagsMap[key]; ok {
return value
}
return metricskey.ValueUnknown
}
func getGlobalMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
return tags, &Global{}
}
func getMetricTypeFunc(domain, component string) func(view *view.View) string {
return func(view *view.View) string {
metricType := path.Join(domain, component, view.Measure.Name())
if metricskey.KnativeRevisionMetrics.Has(metricType) {
return metricType
}
// Unsupported metric by knative_revision, use custom domain.
return path.Join(customMetricTypeDomain, component, view.Measure.Name())
}
}

View File

@ -0,0 +1,182 @@
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"path"
"testing"
. "github.com/knative/pkg/logging/testing"
"github.com/knative/pkg/metrics/metricskey"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
var (
testGcpMetadata = gcpMetadata{
project: "test-project",
location: "test-location",
cluster: "test-cluster",
}
supportedMetricsTestCases = []struct {
name string
domain string
component string
metricName string
}{{
name: "activator metric",
domain: servingDomain,
component: "activator",
metricName: "request_count",
}, {
name: "autoscaler metric",
domain: servingDomain,
component: "autoscaler",
metricName: "desired_pods",
}}
unsupportedMetricsTestCases = []struct {
name string
domain string
component string
metricName string
}{{
name: "unsupported domain",
domain: "unsupported",
component: "activator",
metricName: "request_count",
}, {
name: "unsupported component",
domain: servingDomain,
component: "unsupported",
metricName: "request_count",
}, {
name: "unsupported metric",
domain: servingDomain,
component: "activator",
metricName: "unsupported",
}}
)
func fakeGcpMetadataFun() *gcpMetadata {
return &testGcpMetadata
}
func TestGetMonitoredResourceFunc_UseKnativeRevision(t *testing.T) {
for _, testCase := range supportedMetricsTestCases {
testView = &view.View{
Description: "Test View",
Measure: stats.Int64(testCase.metricName, "Test Measure", stats.UnitNone),
Aggregation: view.LastValue(),
TagKeys: []tag.Key{},
}
mrf := getMonitoredResourceFunc(testCase.domain, testCase.component, &testGcpMetadata)
newTags, monitoredResource := mrf(testView, testTags)
gotResType, labels := monitoredResource.MonitoredResource()
wantedResType := "knative_revision"
if gotResType != wantedResType {
t.Fatalf("MonitoredResource=%v, want %v", gotResType, wantedResType)
}
got := getResourceLabelValue(metricskey.LabelRouteName, newTags)
if got != testRoute {
t.Errorf("expected new tag: %v, got: %v", routeKey, newTags)
}
got, ok := labels[metricskey.LabelNamespaceName]
if !ok || got != testNS {
t.Errorf("expected label %v with value %v, got: %v", metricskey.LabelNamespaceName, testNS, got)
}
got, ok = labels[metricskey.LabelConfigurationName]
if !ok || got != metricskey.ValueUnknown {
t.Errorf("expected label %v with value %v, got: %v", metricskey.LabelConfigurationName, metricskey.ValueUnknown, got)
}
}
}
func TestGetMonitoredResourceFunc_UseGlobal(t *testing.T) {
for _, testCase := range unsupportedMetricsTestCases {
testView = &view.View{
Description: "Test View",
Measure: stats.Int64(testCase.metricName, "Test Measure", stats.UnitNone),
Aggregation: view.LastValue(),
TagKeys: []tag.Key{},
}
mrf := getMonitoredResourceFunc(testCase.domain, testCase.component, &testGcpMetadata)
newTags, monitoredResource := mrf(testView, testTags)
gotResType, labels := monitoredResource.MonitoredResource()
wantedResType := "global"
if gotResType != wantedResType {
t.Fatalf("MonitoredResource=%v, want: %v", gotResType, wantedResType)
}
got := getResourceLabelValue(metricskey.LabelNamespaceName, newTags)
if got != testNS {
t.Errorf("expected new tag %v with value %v, got: %v", routeKey, testNS, newTags)
}
if len(labels) != 0 {
t.Errorf("expected no label, got: %v", labels)
}
}
}
func TestGetgetMetricTypeFunc_UseKnativeDomain(t *testing.T) {
for _, testCase := range supportedMetricsTestCases {
testView = &view.View{
Description: "Test View",
Measure: stats.Int64(testCase.metricName, "Test Measure", stats.UnitNone),
Aggregation: view.LastValue(),
TagKeys: []tag.Key{},
}
mtf := getMetricTypeFunc(testCase.domain, testCase.component)
gotMetricType := mtf(testView)
wantedMetricType := path.Join(testCase.domain, testCase.component, testView.Measure.Name())
if gotMetricType != wantedMetricType {
t.Fatalf("getMetricType=%v, want %v", gotMetricType, wantedMetricType)
}
}
}
func TestGetgetMetricTypeFunc_UseCustomDomain(t *testing.T) {
for _, testCase := range unsupportedMetricsTestCases {
testView = &view.View{
Description: "Test View",
Measure: stats.Int64(testCase.metricName, "Test Measure", stats.UnitNone),
Aggregation: view.LastValue(),
TagKeys: []tag.Key{},
}
mtf := getMetricTypeFunc(testCase.domain, testCase.component)
gotMetricType := mtf(testView)
wantedMetricType := path.Join(customMetricTypeDomain, testCase.component, testView.Measure.Name())
if gotMetricType != wantedMetricType {
t.Fatalf("getMetricType=%v, want %v", gotMetricType, wantedMetricType)
}
}
}
func TestNewStackdriverExporterWithMetadata(t *testing.T) {
e, err := newStackdriverExporter(&metricsConfig{
domain: servingDomain,
component: "autoscaler",
backendDestination: Stackdriver,
stackdriverProjectID: testProj}, TestLogger(t))
if err != nil {
t.Error(err)
}
if e == nil {
t.Error("expected a non-nil metrics exporter")
}
}

View File

@ -0,0 +1,547 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
/*
The code in this file is responsible for converting OpenCensus Proto metrics
directly to Stackdriver Metrics.
*/
import (
"context"
"errors"
"fmt"
"path"
"github.com/golang/protobuf/ptypes/timestamp"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"cloud.google.com/go/monitoring/apiv3"
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
labelpb "google.golang.org/genproto/googleapis/api/label"
googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
)
var errNilMetric = errors.New("expecting a non-nil metric")
type metricPayload struct {
node *commonpb.Node
resource *resourcepb.Resource
metric *metricspb.Metric
}
// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring.
func (se *statsExporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error {
if metric == nil {
return errNilMetric
}
payload := &metricPayload{
metric: metric,
resource: rsc,
node: node,
}
se.protoMetricsBundler.Add(payload, 1)
return nil
}
func (se *statsExporter) handleMetricsUpload(payloads []*metricPayload) error {
ctx, cancel := se.o.newContextWithTimeout()
defer cancel()
ctx, span := trace.StartSpan(
ctx,
"contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics",
trace.WithSampler(trace.NeverSample()),
)
defer span.End()
for _, payload := range payloads {
// Now create the metric descriptor remotely.
if err := se.createMetricDescriptor(ctx, payload.metric); err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
return err
}
}
var allTimeSeries []*monitoringpb.TimeSeries
for _, payload := range payloads {
tsl, err := se.protoMetricToTimeSeries(ctx, payload.node, payload.resource, payload.metric)
if err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
return err
}
allTimeSeries = append(allTimeSeries, tsl...)
}
// Now batch timeseries up and then export.
for start, end := 0, 0; start < len(allTimeSeries); start = end {
end = start + maxTimeSeriesPerUpload
if end > len(allTimeSeries) {
end = len(allTimeSeries)
}
batch := allTimeSeries[start:end]
ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch)
for _, ctsreq := range ctsreql {
if err := createTimeSeries(ctx, se.c, ctsreq); err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
// TODO(@odeke-em): Don't fail fast here, perhaps batch errors?
// return err
}
}
}
return nil
}
func (se *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) {
if len(ts) == 0 {
return nil
}
// Since there are scenarios in which Metrics with the same Type
// can be bunched in the same TimeSeries, we have to ensure that
// we create a unique CreateTimeSeriesRequest with entirely unique Metrics
// per TimeSeries, lest we'll encounter:
//
// err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written:
// Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered.
// Only one point can be written per TimeSeries per request.: timeSeries[2]
//
// This scenario happens when we are using the OpenCensus Agent in which multiple metrics
// are streamed by various client applications.
// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73
uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
seenMetrics := make(map[string]struct{})
for _, tti := range ts {
signature := tti.Metric.GetType()
if _, alreadySeen := seenMetrics[signature]; !alreadySeen {
uniqueTimeSeries = append(uniqueTimeSeries, tti)
seenMetrics[signature] = struct{}{}
} else {
nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti)
}
}
// UniqueTimeSeries can be bunched up together
// While for each nonUniqueTimeSeries, we have
// to make a unique CreateTimeSeriesRequest.
ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{
Name: monitoring.MetricProjectPath(se.o.ProjectID),
TimeSeries: uniqueTimeSeries,
})
// Now recursively also combine the non-unique TimeSeries
// that were singly added to nonUniqueTimeSeries.
// The reason is that we need optimal combinations
// for optimal combinations because:
// * "a/b/c"
// * "a/b/c"
// * "x/y/z"
// * "a/b/c"
// * "x/y/z"
// * "p/y/z"
// * "d/y/z"
//
// should produce:
// CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"]
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"]
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"]
nonUniqueRequests := se.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries)
ctsreql = append(ctsreql, nonUniqueRequests...)
return ctsreql
}
// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest
// but it doesn't invoke any remote API.
func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) ([]*monitoringpb.TimeSeries, error) {
if metric == nil {
return nil, errNilMetric
}
var resource = rsc
if metric.Resource != nil {
resource = metric.Resource
}
metricName, _, _, _ := metricProseFromProto(metric)
metricType, _ := se.metricTypeFromProto(metricName)
metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys()
metricKind, _ := protoMetricDescriptorTypeToMetricKind(metric)
timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.Timeseries))
for _, protoTimeSeries := range metric.Timeseries {
sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind)
if err != nil {
return nil, err
}
// Each TimeSeries has labelValues which MUST be correlated
// with that from the MetricDescriptor
labels, err := labelsPerTimeSeries(se.defaultLabels, metricLabelKeys, protoTimeSeries.GetLabelValues())
if err != nil {
// TODO: (@odeke-em) perhaps log this error from labels extraction, if non-nil.
continue
}
timeSeries = append(timeSeries, &monitoringpb.TimeSeries{
Metric: &googlemetricpb.Metric{
Type: metricType,
Labels: labels,
},
Resource: protoResourceToMonitoredResource(resource),
Points: sdPoints,
})
}
return timeSeries, nil
}
func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []*metricspb.LabelKey, labelValues []*metricspb.LabelValue) (map[string]string, error) {
labels := make(map[string]string)
// Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched.
for key, label := range defaults {
labels[sanitize(key)] = label.val
}
// Perform this sanity check now.
if len(labelKeys) != len(labelValues) {
return labels, fmt.Errorf("Length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
}
for i, labelKey := range labelKeys {
labelValue := labelValues[i]
labels[sanitize(labelKey.GetKey())] = labelValue.GetValue()
}
return labels, nil
}
func (se *statsExporter) protoMetricDescriptorToCreateMetricDescriptorRequest(ctx context.Context, metric *metricspb.Metric) (*monitoringpb.CreateMetricDescriptorRequest, error) {
// Otherwise, we encountered a cache-miss and
// should create the metric descriptor remotely.
inMD, err := se.protoToMonitoringMetricDescriptor(metric)
if err != nil {
return nil, err
}
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
MetricDescriptor: inMD,
}
return cmrdesc, nil
}
// createMetricDescriptor creates a metric descriptor from the OpenCensus proto metric
// and then creates it remotely using Stackdriver's API.
func (se *statsExporter) createMetricDescriptor(ctx context.Context, metric *metricspb.Metric) error {
se.protoMu.Lock()
defer se.protoMu.Unlock()
name := metric.GetMetricDescriptor().GetName()
if _, created := se.protoMetricDescriptors[name]; created {
return nil
}
// Otherwise, we encountered a cache-miss and
// should create the metric descriptor remotely.
inMD, err := se.protoToMonitoringMetricDescriptor(metric)
if err != nil {
return err
}
var md *googlemetricpb.MetricDescriptor
if builtinMetric(inMD.Type) {
gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
Name: inMD.Name,
}
md, err = getMetricDescriptor(ctx, se.c, gmrdesc)
} else {
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
MetricDescriptor: inMD,
}
md, err = createMetricDescriptor(ctx, se.c, cmrdesc)
}
if err == nil {
// Now record the metric as having been created.
se.protoMetricDescriptors[name] = md
}
return err
}
func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) {
for _, pt := range ts.Points {
// If we have a last value aggregation point i.e. MetricDescriptor_GAUGE
// StartTime should be nil.
startTime := ts.StartTimestamp
if metricKind == googlemetricpb.MetricDescriptor_GAUGE {
startTime = nil
}
spt, err := fromProtoPoint(startTime, pt)
if err != nil {
return nil, err
}
sptl = append(sptl, spt)
}
return sptl, nil
}
func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric) (*googlemetricpb.MetricDescriptor, error) {
if metric == nil {
return nil, errNilMetric
}
metricName, description, unit, _ := metricProseFromProto(metric)
metricType, _ := se.metricTypeFromProto(metricName)
displayName := se.displayName(metricName)
metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric)
sdm := &googlemetricpb.MetricDescriptor{
Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType),
DisplayName: displayName,
Description: description,
Unit: unit,
Type: metricType,
MetricKind: metricKind,
ValueType: valueType,
Labels: labelDescriptorsFromProto(se.defaultLabels, metric.GetMetricDescriptor().GetLabelKeys()),
}
return sdm, nil
}
func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []*metricspb.LabelKey) []*labelpb.LabelDescriptor {
labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(protoLabelKeys))
// Fill in the defaults first.
for key, lbl := range defaults {
labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
Key: sanitize(key),
Description: lbl.desc,
ValueType: labelpb.LabelDescriptor_STRING,
})
}
// Now fill in those from the metric.
for _, protoKey := range protoLabelKeys {
labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
Key: sanitize(protoKey.GetKey()),
Description: protoKey.GetDescription(),
ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags
})
}
return labelDescriptors
}
func metricProseFromProto(metric *metricspb.Metric) (name, description, unit string, ok bool) {
mname := metric.GetName()
if mname != "" {
name = mname
return
}
md := metric.GetMetricDescriptor()
name = md.GetName()
unit = md.GetUnit()
description = md.GetDescription()
if md != nil && md.Type == metricspb.MetricDescriptor_CUMULATIVE_INT64 {
// If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
// because this view does not apply to the recorded values.
unit = stats.UnitDimensionless
}
return
}
func (se *statsExporter) metricTypeFromProto(name string) (string, bool) {
// TODO: (@odeke-em) support non-"custom.googleapis.com" metrics names.
name = path.Join("custom.googleapis.com", "opencensus", name)
return name, true
}
func fromProtoPoint(startTime *timestamp.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) {
if pt == nil {
return nil, nil
}
mptv, err := protoToMetricPoint(pt.Value)
if err != nil {
return nil, err
}
mpt := &monitoringpb.Point{
Value: mptv,
Interval: &monitoringpb.TimeInterval{
StartTime: startTime,
EndTime: pt.Timestamp,
},
}
return mpt, nil
}
func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
if value == nil {
return nil, nil
}
var err error
var tval *monitoringpb.TypedValue
switch v := value.(type) {
default:
// All the other types are not yet handled.
// TODO: (@odeke-em, @songy23) talk to the Stackdriver team to determine
// the use cases for:
//
// *TypedValue_BoolValue
// *TypedValue_StringValue
//
// and then file feature requests on OpenCensus-Specs and then OpenCensus-Proto,
// lest we shall error here.
//
// TODO: Add conversion from SummaryValue when
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66
// has been figured out.
err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value)
case *metricspb.Point_Int64Value:
tval = &monitoringpb.TypedValue{
Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: v.Int64Value,
},
}
case *metricspb.Point_DoubleValue:
tval = &monitoringpb.TypedValue{
Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: v.DoubleValue,
},
}
case *metricspb.Point_DistributionValue:
dv := v.DistributionValue
var mv *monitoringpb.TypedValue_DistributionValue
if dv != nil {
var mean float64
if dv.Count > 0 {
mean = float64(dv.Sum) / float64(dv.Count)
}
mv = &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: dv.Count,
Mean: mean,
SumOfSquaredDeviation: dv.SumOfSquaredDeviation,
BucketCounts: bucketCounts(dv.Buckets),
},
}
if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil {
bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_)
if ok && bexp != nil && bexp.Explicit != nil {
mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: bexp.Explicit.Bounds[:],
},
},
}
}
}
}
tval = &monitoringpb.TypedValue{Value: mv}
}
return tval, err
}
func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 {
bucketCounts := make([]int64, len(buckets))
for i, bucket := range buckets {
if bucket != nil {
bucketCounts[i] = bucket.Count
}
}
return bucketCounts
}
func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
dt := m.GetMetricDescriptor()
if dt == nil {
return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
}
switch dt.Type {
case metricspb.MetricDescriptor_CUMULATIVE_INT64:
return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE:
return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION:
return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
case metricspb.MetricDescriptor_GAUGE_DOUBLE:
return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
case metricspb.MetricDescriptor_GAUGE_INT64:
return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION:
return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION
default:
return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
}
}
func protoResourceToMonitoredResource(rsp *resourcepb.Resource) *monitoredrespb.MonitoredResource {
if rsp == nil {
return &monitoredrespb.MonitoredResource{
Type: "global",
}
}
typ := rsp.Type
if typ == "" {
typ = "global"
}
mrsp := &monitoredrespb.MonitoredResource{
Type: typ,
}
if rsp.Labels != nil {
mrsp.Labels = make(map[string]string, len(rsp.Labels))
for k, v := range rsp.Labels {
mrsp.Labels[k] = v
}
}
return mrsp
}

View File

@ -62,6 +62,10 @@ import (
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
)
// Options contains options for configuring the exporter.
@ -274,6 +278,11 @@ func (e *Exporter) ExportView(vd *view.Data) {
e.statsExporter.ExportView(vd)
}
// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring.
func (e *Exporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error {
return e.statsExporter.ExportMetric(ctx, node, rsc, metric)
}
// ExportSpan exports a SpanData to Stackdriver Trace.
func (e *Exporter) ExportSpan(sd *trace.SpanData) {
if len(e.traceExporter.o.DefaultTraceAttributes) > 0 {

View File

@ -48,19 +48,24 @@ const (
opencensusTaskKey = "opencensus_task"
opencensusTaskDescription = "Opencensus task identifier"
defaultDisplayNamePrefix = "OpenCensus"
version = "0.8.0"
version = "0.10.0"
)
var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version)
// statsExporter exports stats to the Stackdriver Monitoring.
type statsExporter struct {
bundler *bundler.Bundler
o Options
viewDataBundler *bundler.Bundler
protoMetricsBundler *bundler.Bundler
createdViewsMu sync.Mutex
createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely
protoMu sync.Mutex
protoMetricDescriptors map[string]*metricpb.MetricDescriptor // Saves the metric descriptors that were already created remotely
c *monitoring.MetricClient
defaultLabels map[string]labelValue
}
@ -88,6 +93,7 @@ func newStatsExporter(o Options) (*statsExporter, error) {
c: client,
o: o,
createdViews: make(map[string]*metricpb.MetricDescriptor),
protoMetricDescriptors: make(map[string]*metricpb.MetricDescriptor),
}
if o.DefaultMonitoringLabels != nil {
@ -97,15 +103,22 @@ func newStatsExporter(o Options) (*statsExporter, error) {
opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription},
}
}
e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
vds := bundle.([]*view.Data)
e.handleUpload(vds...)
})
if e.o.BundleDelayThreshold > 0 {
e.bundler.DelayThreshold = e.o.BundleDelayThreshold
e.protoMetricsBundler = bundler.NewBundler((*metricPayload)(nil), func(bundle interface{}) {
payloads := bundle.([]*metricPayload)
e.handleMetricsUpload(payloads)
})
if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 {
e.viewDataBundler.DelayThreshold = delayThreshold
e.protoMetricsBundler.DelayThreshold = delayThreshold
}
if e.o.BundleCountThreshold > 0 {
e.bundler.BundleCountThreshold = e.o.BundleCountThreshold
if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 {
e.viewDataBundler.BundleCountThreshold = countThreshold
e.protoMetricsBundler.BundleCountThreshold = countThreshold
}
return e, nil
}
@ -131,7 +144,7 @@ func (e *statsExporter) ExportView(vd *view.Data) {
if len(vd.Rows) == 0 {
return
}
err := e.bundler.Add(vd, 1)
err := e.viewDataBundler.Add(vd, 1)
switch err {
case nil:
return
@ -160,12 +173,13 @@ func (e *statsExporter) handleUpload(vds ...*view.Data) {
}
}
// Flush waits for exported view data to be uploaded.
// Flush waits for exported view data and metrics to be uploaded.
//
// This is useful if your program is ending and you do not
// want to lose recent spans.
// want to lose data that hasn't yet been exported.
func (e *statsExporter) Flush() {
e.bundler.Flush()
e.viewDataBundler.Flush()
e.protoMetricsBundler.Flush()
}
func (e *statsExporter) uploadStats(vds []*view.Data) error {
@ -194,55 +208,47 @@ func (e *statsExporter) uploadStats(vds []*view.Data) error {
return nil
}
func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest {
func (se *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest {
var reqs []*monitoringpb.CreateTimeSeriesRequest
var timeSeries []*monitoringpb.TimeSeries
var allTimeSeries []*monitoringpb.TimeSeries
for _, vd := range vds {
for _, row := range vd.Rows {
tags, resource := e.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...))
tags, resource := se.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...))
ts := &monitoringpb.TimeSeries{
Metric: &metricpb.Metric{
Type: e.metricType(vd.View),
Labels: newLabels(e.defaultLabels, tags),
Type: se.metricType(vd.View),
Labels: newLabels(se.defaultLabels, tags),
},
Resource: resource,
Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)},
}
allTimeSeries = append(allTimeSeries, ts)
}
}
var timeSeries []*monitoringpb.TimeSeries
for _, ts := range allTimeSeries {
timeSeries = append(timeSeries, ts)
if len(timeSeries) == limit {
reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
Name: monitoring.MetricProjectPath(e.o.ProjectID),
TimeSeries: timeSeries,
})
timeSeries = []*monitoringpb.TimeSeries{}
}
ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries)
reqs = append(reqs, ctsreql...)
timeSeries = timeSeries[:0]
}
}
if len(timeSeries) > 0 {
reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
Name: monitoring.MetricProjectPath(e.o.ProjectID),
TimeSeries: timeSeries,
})
ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries)
reqs = append(reqs, ctsreql...)
}
return reqs
}
// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
// An error will be returned if there is already a metric descriptor created with the same name
// but it has a different aggregation or keys.
func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
e.createdViewsMu.Lock()
defer e.createdViewsMu.Unlock()
func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) {
m := v.Measure
agg := v.Aggregation
tagKeys := v.TagKeys
viewName := v.Name
if md, ok := e.createdViews[viewName]; ok {
return e.equalMeasureAggTagKeys(md, m, agg, tagKeys)
}
metricType := e.metricType(v)
var valueType metricpb.MetricDescriptor_ValueType
unit := m.Unit()
@ -273,23 +279,17 @@ func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
valueType = metricpb.MetricDescriptor_DOUBLE
}
default:
return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
return nil, fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
}
var displayName string
if e.o.GetMetricDisplayName == nil {
displayNamePrefix := defaultDisplayNamePrefix
if e.o.MetricPrefix != "" {
displayNamePrefix = e.o.MetricPrefix
}
displayName = path.Join(displayNamePrefix, viewName)
displayName = e.displayName(viewName)
} else {
displayName = e.o.GetMetricDisplayName(v)
}
md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
MetricDescriptor: &metricpb.MetricDescriptor{
res := &metricpb.MetricDescriptor{
Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
DisplayName: displayName,
Description: v.Description,
@ -298,14 +298,73 @@ func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
MetricKind: metricKind,
ValueType: valueType,
Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys),
},
})
}
return res, nil
}
func (e *statsExporter) viewToCreateMetricDescriptorRequest(ctx context.Context, v *view.View) (*monitoringpb.CreateMetricDescriptorRequest, error) {
inMD, err := e.viewToMetricDescriptor(ctx, v)
if err != nil {
return nil, err
}
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
MetricDescriptor: inMD,
}
return cmrdesc, nil
}
// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
// An error will be returned if there is already a metric descriptor created with the same name
// but it has a different aggregation or keys.
func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
e.createdViewsMu.Lock()
defer e.createdViewsMu.Unlock()
viewName := v.Name
if md, ok := e.createdViews[viewName]; ok {
// [TODO:rghetia] Temporary fix for https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/76#issuecomment-459459091
if builtinMetric(md.Type) {
return nil
}
return e.equalMeasureAggTagKeys(md, v.Measure, v.Aggregation, v.TagKeys)
}
inMD, err := e.viewToMetricDescriptor(ctx, v)
if err != nil {
return err
}
e.createdViews[viewName] = md
return nil
var dmd *metric.MetricDescriptor
if builtinMetric(inMD.Type) {
gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
Name: inMD.Name,
}
dmd, err = getMetricDescriptor(ctx, e.c, gmrdesc)
} else {
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
MetricDescriptor: inMD,
}
dmd, err = createMetricDescriptor(ctx, e.c, cmrdesc)
}
if err != nil {
return err
}
// Now cache the metric descriptor
e.createdViews[viewName] = dmd
return err
}
func (e *statsExporter) displayName(suffix string) string {
displayNamePrefix := defaultDisplayNamePrefix
if e.o.MetricPrefix != "" {
displayNamePrefix = e.o.MetricPrefix
}
return path.Join(displayNamePrefix, suffix)
}
func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
@ -494,3 +553,19 @@ var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient,
var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error {
return c.CreateTimeSeries(ctx, ts)
}
var knownExternalMetricPrefixes = []string{
"custom.googleapis.com/",
"external.googleapis.com/",
}
// builtinMetric returns true if a MetricType is a heuristically known
// built-in Stackdriver metric
func builtinMetric(metricType string) bool {
for _, knownExternalMetric := range knownExternalMetricPrefixes {
if strings.HasPrefix(metricType, knownExternalMetric) {
return false
}
}
return true
}

View File

@ -0,0 +1 @@
Google Inc.

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,356 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/agent/common/v1/common.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type LibraryInfo_Language int32
const (
LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
LibraryInfo_CPP LibraryInfo_Language = 1
LibraryInfo_C_SHARP LibraryInfo_Language = 2
LibraryInfo_ERLANG LibraryInfo_Language = 3
LibraryInfo_GO_LANG LibraryInfo_Language = 4
LibraryInfo_JAVA LibraryInfo_Language = 5
LibraryInfo_NODE_JS LibraryInfo_Language = 6
LibraryInfo_PHP LibraryInfo_Language = 7
LibraryInfo_PYTHON LibraryInfo_Language = 8
LibraryInfo_RUBY LibraryInfo_Language = 9
)
var LibraryInfo_Language_name = map[int32]string{
0: "LANGUAGE_UNSPECIFIED",
1: "CPP",
2: "C_SHARP",
3: "ERLANG",
4: "GO_LANG",
5: "JAVA",
6: "NODE_JS",
7: "PHP",
8: "PYTHON",
9: "RUBY",
}
var LibraryInfo_Language_value = map[string]int32{
"LANGUAGE_UNSPECIFIED": 0,
"CPP": 1,
"C_SHARP": 2,
"ERLANG": 3,
"GO_LANG": 4,
"JAVA": 5,
"NODE_JS": 6,
"PHP": 7,
"PYTHON": 8,
"RUBY": 9,
}
func (x LibraryInfo_Language) String() string {
return proto.EnumName(LibraryInfo_Language_name, int32(x))
}
func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2, 0}
}
// Identifier metadata of the Node (Application instrumented with OpenCensus)
// that connects to OpenCensus Agent.
// In the future we plan to extend the identifier proto definition to support
// additional information (e.g cloud id, etc.)
type Node struct {
// Identifier that uniquely identifies a process within a VM/container.
Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
// Information on the OpenCensus Library that initiates the stream.
LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
// Additional information on service.
ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
// Additional attributes.
Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{0}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
}
func (m *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(m, src)
}
func (m *Node) XXX_Size() int {
return xxx_messageInfo_Node.Size(m)
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *Node) GetIdentifier() *ProcessIdentifier {
if m != nil {
return m.Identifier
}
return nil
}
func (m *Node) GetLibraryInfo() *LibraryInfo {
if m != nil {
return m.LibraryInfo
}
return nil
}
func (m *Node) GetServiceInfo() *ServiceInfo {
if m != nil {
return m.ServiceInfo
}
return nil
}
func (m *Node) GetAttributes() map[string]string {
if m != nil {
return m.Attributes
}
return nil
}
// Identifier that uniquely identifies a process within a VM/container.
type ProcessIdentifier struct {
// The host name. Usually refers to the machine/container name.
// For example: os.Hostname() in Go, socket.gethostname() in Python.
HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
// Process id.
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Start time of this ProcessIdentifier. Represented in epoch time.
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
func (*ProcessIdentifier) ProtoMessage() {}
func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{1}
}
func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
}
func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
}
func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessIdentifier.Merge(m, src)
}
func (m *ProcessIdentifier) XXX_Size() int {
return xxx_messageInfo_ProcessIdentifier.Size(m)
}
func (m *ProcessIdentifier) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
func (m *ProcessIdentifier) GetHostName() string {
if m != nil {
return m.HostName
}
return ""
}
func (m *ProcessIdentifier) GetPid() uint32 {
if m != nil {
return m.Pid
}
return 0
}
func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
if m != nil {
return m.StartTimestamp
}
return nil
}
// Information on OpenCensus Library.
type LibraryInfo struct {
// Language of OpenCensus Library.
Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
// Version of Agent exporter of Library.
ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
// Version of OpenCensus Library.
CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
func (*LibraryInfo) ProtoMessage() {}
func (*LibraryInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2}
}
func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
}
func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
}
func (m *LibraryInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_LibraryInfo.Merge(m, src)
}
func (m *LibraryInfo) XXX_Size() int {
return xxx_messageInfo_LibraryInfo.Size(m)
}
func (m *LibraryInfo) XXX_DiscardUnknown() {
xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
}
var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
if m != nil {
return m.Language
}
return LibraryInfo_LANGUAGE_UNSPECIFIED
}
func (m *LibraryInfo) GetExporterVersion() string {
if m != nil {
return m.ExporterVersion
}
return ""
}
func (m *LibraryInfo) GetCoreLibraryVersion() string {
if m != nil {
return m.CoreLibraryVersion
}
return ""
}
// Additional service information.
type ServiceInfo struct {
// Name of the service.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
func (*ServiceInfo) ProtoMessage() {}
func (*ServiceInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{3}
}
func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
}
func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
}
func (m *ServiceInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceInfo.Merge(m, src)
}
func (m *ServiceInfo) XXX_Size() int {
return xxx_messageInfo_ServiceInfo.Size(m)
}
func (m *ServiceInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
func (m *ServiceInfo) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
}
func init() {
proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
}
var fileDescriptor_126c72ed8a252c84 = []byte{
// 590 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/resource/v1/resource.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Resource information.
type Resource struct {
// Type identifier for the resource.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Set of labels that describe the resource.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Resource) Reset() { *m = Resource{} }
func (m *Resource) String() string { return proto.CompactTextString(m) }
func (*Resource) ProtoMessage() {}
func (*Resource) Descriptor() ([]byte, []int) {
return fileDescriptor_584700775a2fc762, []int{0}
}
func (m *Resource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Resource.Unmarshal(m, b)
}
func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
}
func (m *Resource) XXX_Merge(src proto.Message) {
xxx_messageInfo_Resource.Merge(m, src)
}
func (m *Resource) XXX_Size() int {
return xxx_messageInfo_Resource.Size(m)
}
func (m *Resource) XXX_DiscardUnknown() {
xxx_messageInfo_Resource.DiscardUnknown(m)
}
var xxx_messageInfo_Resource proto.InternalMessageInfo
func (m *Resource) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *Resource) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func init() {
proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
}
func init() {
proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
}
var fileDescriptor_584700775a2fc762 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
}