Update OTLP protos to v0.9.0 (#2088)

* Update to v0.9.0 OTLP protos

* Fix conflict with Equals property from generated code and standard Equals method
This commit is contained in:
Alan West 2021-06-16 11:12:26 -07:00 committed by GitHub
parent 6894a034b8
commit 9f37c7d59a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 374 additions and 171 deletions

View File

@ -24,7 +24,7 @@ option java_outer_classname = "TraceServiceProto";
option go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go/collector/trace/v1";
// Service that can be used to push spans between one Application instrumented with
// OpenTelemetry and an collector, or between an collector and a central collector (in this
// OpenTelemetry and a collector, or between a collector and a central collector (in this
// case spans are sent/received to/from multiple Applications).
service TraceService {
// For performance reasons, it is recommended to keep this RPC

View File

@ -34,6 +34,7 @@ message AnyValue {
double double_value = 4;
ArrayValue array_value = 5;
KeyValueList kvlist_value = 6;
bytes bytes_value = 7;
}
}
@ -65,6 +66,8 @@ message KeyValue {
// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version
// of KeyValue that only supports string values.
message StringKeyValue {
option deprecated = true;
string key = 1;
string value = 2;
}

View File

@ -32,6 +32,11 @@ message ResourceLogs {
// A list of InstrumentationLibraryLogs that originate from a resource.
repeated InstrumentationLibraryLogs instrumentation_library_logs = 2;
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "instrumentation_library_logs" field which have their own
// schema_url field.
string schema_url = 3;
}
// A collection of Logs produced by an InstrumentationLibrary.
@ -43,6 +48,9 @@ message InstrumentationLibraryLogs {
// A list of log records.
repeated LogRecord logs = 2;
// This schema_url applies to all logs in the "logs" field.
string schema_url = 3;
}
// Possible values for LogRecord.SeverityNumber.
@ -82,7 +90,7 @@ enum LogRecordFlags {
}
// A log record according to OpenTelemetry Log Data Model:
// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md
// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
message LogRecord {
// time_unix_nano is the time when the event occurred.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.

View File

@ -0,0 +1,102 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package opentelemetry.proto.metrics.experimental;
import "opentelemetry/proto/resource/v1/resource.proto";
option java_multiple_files = true;
option java_package = "io.opentelemetry.proto.metrics.experimental";
option java_outer_classname = "MetricConfigServiceProto";
option go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go/metrics/experimental";
// MetricConfig is a service that enables updating metric schedules, trace
// parameters, and other configurations on the SDK without having to restart the
// instrumented application. The collector can also serve as the configuration
// service, acting as a bridge between third-party configuration services and
// the SDK, piping updated configs from a third-party source to an instrumented
// application.
service MetricConfig {
rpc GetMetricConfig (MetricConfigRequest) returns (MetricConfigResponse);
}
message MetricConfigRequest{
// Required. The resource for which configuration should be returned.
opentelemetry.proto.resource.v1.Resource resource = 1;
// Optional. The value of MetricConfigResponse.fingerprint for the last
// configuration that the caller received and successfully applied.
bytes last_known_fingerprint = 2;
}
message MetricConfigResponse {
// Optional. The fingerprint associated with this MetricConfigResponse. Each
// change in configs yields a different fingerprint. The resource SHOULD copy
// this value to MetricConfigRequest.last_known_fingerprint for the next
// configuration request. If there are no changes between fingerprint and
// MetricConfigRequest.last_known_fingerprint, then all other fields besides
// fingerprint in the response are optional, or the same as the last update if
// present.
//
// The exact mechanics of generating the fingerprint is up to the
// implementation. However, a fingerprint must be deterministically determined
// by the configurations -- the same configuration will generate the same
// fingerprint on any instance of an implementation. Hence using a timestamp is
// unacceptable, but a deterministic hash is fine.
bytes fingerprint = 1;
// A Schedule is used to apply a particular scheduling configuration to
// a metric. If a metric name matches a schedule's patterns, then the metric
// adopts the configuration specified by the schedule.
message Schedule {
// A light-weight pattern that can match 1 or more
// metrics, for which this schedule will apply. The string is used to
// match against metric names. It should not exceed 100k characters.
message Pattern {
oneof match {
string equal_to = 1; // matches the metric name exactly
string starts_with = 2; // prefix-matches the metric name
}
}
// Metrics with names that match a rule in the inclusion_patterns are
// targeted by this schedule. Metrics that match the exclusion_patterns
// are not targeted for this schedule, even if they match an inclusion
// pattern.
repeated Pattern exclusion_patterns = 1;
repeated Pattern inclusion_patterns = 2;
// Describes the collection period for each metric in seconds.
// A period of 0 means to not export.
int32 period_sec = 3;
}
// A single metric may match multiple schedules. In such cases, the schedule
// that specifies the smallest period is applied.
//
// Note, for optimization purposes, it is recommended to use as few schedules
// as possible to capture all required metric updates. Where you can be
// conservative, do take full advantage of the inclusion/exclusion patterns to
// capture as much of your targeted metrics.
repeated Schedule schedules = 2;
// Optional. The client is suggested to wait this long (in seconds) before
// pinging the configuration service again.
int32 suggested_wait_time_sec = 3;
}

View File

@ -32,6 +32,11 @@ message ResourceMetrics {
// A list of metrics that originate from a resource.
repeated InstrumentationLibraryMetrics instrumentation_library_metrics = 2;
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "instrumentation_library_metrics" field which have their own
// schema_url field.
string schema_url = 3;
}
// A collection of Metrics produced by an InstrumentationLibrary.
@ -43,9 +48,16 @@ message InstrumentationLibraryMetrics {
// A list of metrics that originate from an instrumentation library.
repeated Metric metrics = 2;
// This schema_url applies to all metrics in the "metrics" field.
string schema_url = 3;
}
// Defines a Metric which has one or more timeseries.
// Defines a Metric which has one or more timeseries. The following is a
// brief summary of the Metric data model. For more details, see:
//
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md
//
//
// The data model and relation between entities is shown in the
// diagram below. Here, "DataPoint" is the term used to refer to any
@ -54,8 +66,8 @@ message InstrumentationLibraryMetrics {
//
// - Metric is composed of a metadata and data.
// - Metadata part contains a name, description, unit.
// - Data is one of the possible types (Gauge, Sum, Histogram, etc.).
// - DataPoint contains timestamps, labels, and one of the possible value type
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
// - DataPoint contains timestamps, attributes, and one of the possible value type
// fields.
//
// Metric
@ -95,14 +107,37 @@ message InstrumentationLibraryMetrics {
// |+-----+ |
// +---------------------------+
//
// Each distinct type of DataPoint represents the output of a specific
// aggregation function, the result of applying the DataPoint's
// associated function of to one or more measurements.
//
// All DataPoint types have three common fields:
// - Labels zero or more key-value pairs associated with the data point.
// - StartTimeUnixNano MUST be set to the start of the interval when the data's
// type includes an AggregationTemporality. This field is not set otherwise.
// - TimeUnixNano MUST be set to:
// - the moment when an aggregation is reported (independent of the
// aggregation temporality).
// - the instantaneous time of the event.
// - Attributes includes key-value pairs associated with the data point
// - TimeUnixNano is required, set to the end time of the aggregation
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
// having an AggregationTemporality field, as discussed below.
//
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// # TimeUnixNano
//
// This field is required, having consistent interpretation across
// DataPoint types. TimeUnixNano is the moment corresponding to when
// the data point's aggregate value was captured.
//
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
// by consumers.
//
// # StartTimeUnixNano
//
// StartTimeUnixNano in general allows detecting when a sequence of
// observations is unbroken. This field indicates to consumers the
// start time for points with cumulative and delta
// AggregationTemporality, and it should be included whenever possible
// to support correct rate calculation. Although it may be omitted
// when the start time is truly unknown, setting StartTimeUnixNano is
// strongly encouraged.
message Metric {
// name of the metric, including its DNS name prefix. It must be unique.
string name = 1;
@ -114,68 +149,74 @@ message Metric {
// described by http://unitsofmeasure.org/ucum.html.
string unit = 3;
// TODO: Decide if support for RawMeasurements (measurements recorded using
// the synchronous instruments) is necessary. It can be used to delegate the
// aggregation from the application to the agent/collector. See
// https://github.com/open-telemetry/opentelemetry-specification/issues/617
// Data determines the aggregation type (if any) of the metric, what is the
// reported value type for the data points, as well as the relatationship to
// the time interval over which they are reported.
//
// TODO: Update table after the decision on:
// https://github.com/open-telemetry/opentelemetry-specification/issues/731.
// By default, metrics recording using the OpenTelemetry API are exported as
// (the table does not include MeasurementValueType to avoid extra rows):
//
// Instrument Type
// ----------------------------------------------
// Counter Sum(aggregation_temporality=delta;is_monotonic=true)
// UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false)
// ValueRecorder TBD
// SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true)
// UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false)
// ValueObserver Gauge()
oneof data {
IntGauge int_gauge = 4;
DoubleGauge double_gauge = 5;
IntSum int_sum = 6;
DoubleSum double_sum = 7;
IntHistogram int_histogram = 8;
DoubleHistogram double_histogram = 9;
DoubleSummary double_summary = 11;
// IntGauge and IntSum are deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `int_gauge` and `int_sum` fields.
// 2. New senders, which are aware of this change MUST send only `gauge`
// and `sum` fields.
// 3. New receivers, which are aware of this change MUST convert these into
// `gauge` and `sum` by using the provided as_int field in the oneof values.
// This field will be removed in ~3 months, on July 1, 2021.
IntGauge int_gauge = 4 [deprecated = true];
Gauge gauge = 5;
// This field will be removed in ~3 months, on July 1, 2021.
IntSum int_sum = 6 [deprecated = true];
Sum sum = 7;
// IntHistogram is deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `int_histogram` field.
// 2. New senders, which are aware of this change MUST send only `histogram`.
// 3. New receivers, which are aware of this change MUST convert this into
// `histogram` by simply converting all int64 values into float.
// This field will be removed in ~3 months, on July 1, 2021.
IntHistogram int_histogram = 8 [deprecated = true];
Histogram histogram = 9;
Summary summary = 11;
}
}
// Gauge represents the type of a int scalar metric that always exports the
// IntGauge is deprecated. Use Gauge with an integer value in NumberDataPoint.
//
// IntGauge represents the type of a int scalar metric that always exports the
// "current value" for every data point. It should be used for an "unknown"
// aggregation.
//
//
// A Gauge does not support different aggregation temporalities. Given the
// aggregation is unknown, points cannot be combined using the same
// aggregation, regardless of aggregation temporalities. Therefore,
// AggregationTemporality is not included. Consequently, this also means
// "StartTimeUnixNano" is ignored for all data points.
message IntGauge {
option deprecated = true;
repeated IntDataPoint data_points = 1;
}
// Gauge represents the type of a double scalar metric that always exports the
// "current value" for every data point. It should be used for an "unknown"
// aggregation.
//
//
// A Gauge does not support different aggregation temporalities. Given the
// aggregation is unknown, points cannot be combined using the same
// aggregation, regardless of aggregation temporalities. Therefore,
// AggregationTemporality is not included. Consequently, this also means
// "StartTimeUnixNano" is ignored for all data points.
message DoubleGauge {
repeated DoubleDataPoint data_points = 1;
message Gauge {
repeated NumberDataPoint data_points = 1;
}
// Sum represents the type of a numeric int scalar metric that is calculated as
// IntSum is deprecated. Use Sum with an integer value in NumberDataPoint.
//
// IntSum represents the type of a numeric int scalar metric that is calculated as
// a sum of all reported measurements over a time interval.
message IntSum {
option deprecated = true;
repeated IntDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
@ -188,9 +229,9 @@ message IntSum {
// Sum represents the type of a numeric double scalar metric that is calculated
// as a sum of all reported measurements over a time interval.
message DoubleSum {
repeated DoubleDataPoint data_points = 1;
message Sum {
repeated NumberDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality aggregation_temporality = 2;
@ -199,9 +240,14 @@ message DoubleSum {
bool is_monotonic = 3;
}
// Represents the type of a metric that is calculated by aggregating as a
// IntHistogram is deprecated, replaced by Histogram points using double-
// valued exemplars.
//
// This represents the type of a metric that is calculated by aggregating as a
// Histogram of all reported int measurements over a time interval.
message IntHistogram {
option deprecated = true;
repeated IntHistogramDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
@ -209,24 +255,24 @@ message IntHistogram {
AggregationTemporality aggregation_temporality = 2;
}
// Represents the type of a metric that is calculated by aggregating as a
// Histogram of all reported double measurements over a time interval.
message DoubleHistogram {
repeated DoubleHistogramDataPoint data_points = 1;
// Histogram represents the type of a metric that is calculated by aggregating
// as a Histogram of all reported double measurements over a time interval.
message Histogram {
repeated HistogramDataPoint data_points = 1;
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality aggregation_temporality = 2;
}
// DoubleSummary metric data are used to convey quantile summaries,
// Summary metric data are used to convey quantile summaries,
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
message DoubleSummary {
repeated DoubleSummaryDataPoint data_points = 1;
message Summary {
repeated SummaryDataPoint data_points = 1;
}
// AggregationTemporality defines how a metric aggregator reports aggregated
@ -293,7 +339,7 @@ enum AggregationTemporality {
// number of requests received over the interval of time t_1 to
// t_0+1 with a value of 1.
//
// Note: Even though, when reporting changes since last report time, using
// Note: Even though, when reporting changes since last report time, using
// CUMULATIVE is valid, it is not recommended. This may cause problems for
// systems that do not use start_time to determine when the aggregation
// value was reset (e.g. Prometheus).
@ -303,25 +349,20 @@ enum AggregationTemporality {
// IntDataPoint is a single data point in a timeseries that describes the
// time-varying values of a int64 metric.
message IntDataPoint {
option deprecated = true;
// The set of labels that uniquely identify this timeseries.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1;
// start_time_unix_nano is the last time when the aggregation value was reset
// to "zero". For some metric types this is ignored, see data types for more
// details.
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detiled comments above Metric.
//
// The aggregation value is over the time interval (start_time_unix_nano,
// time_unix_nano].
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
//
// Value of 0 indicates that the timestamp is unspecified. In that case the
// timestamp may be decided by the backend.
fixed64 start_time_unix_nano = 2;
// time_unix_nano is the moment when this aggregation value was reported.
//
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
@ -334,64 +375,75 @@ message IntDataPoint {
repeated IntExemplar exemplars = 5;
}
// DoubleDataPoint is a single data point in a timeseries that describes the
// NumberDataPoint is a single data point in a timeseries that describes the
// time-varying value of a double metric.
message DoubleDataPoint {
// The set of labels that uniquely identify this timeseries.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1;
message NumberDataPoint {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
// start_time_unix_nano is the last time when the aggregation value was reset
// to "zero". For some metric types this is ignored, see data types for more
// details.
// Labels is deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `labels` field.
// 2. New senders, which are aware of this change MUST send only `attributes`.
// 3. New receivers, which are aware of this change MUST convert this into
// `labels` by simply converting all int64 values into float.
//
// This field will be removed in ~3 months, on July 1, 2021.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1 [deprecated = true];
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detiled comments above Metric.
//
// The aggregation value is over the time interval (start_time_unix_nano,
// time_unix_nano].
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
//
// Value of 0 indicates that the timestamp is unspecified. In that case the
// timestamp may be decided by the backend.
fixed64 start_time_unix_nano = 2;
// time_unix_nano is the moment when this aggregation value was reported.
//
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
// value itself.
double value = 4;
// The value itself. A point is considered invalid when one of the recognized
// value fields is not present inside this oneof.
oneof value {
double as_double = 4;
sfixed64 as_int = 6;
}
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
repeated DoubleExemplar exemplars = 5;
repeated Exemplar exemplars = 5;
}
// IntHistogramDataPoint is a single data point in a timeseries that describes
// IntHistogramDataPoint is deprecated; use HistogramDataPoint.
//
// This is a single data point in a timeseries that describes
// the time-varying values of a Histogram of int values. A Histogram contains
// summary statistics for a population of values, it may optionally contain
// the distribution of those values across a set of buckets.
//
// If the histogram contains the distribution of values, then both
// "explicit_bounds" and "bucket counts" fields must be defined.
// If the histogram does not contain the distribution of values, then both
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
// "sum" are known.
message IntHistogramDataPoint {
option deprecated = true;
// The set of labels that uniquely identify this timeseries.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1;
// start_time_unix_nano is the last time when the aggregation value was reset
// to "zero". For some metric types this is ignored, see data types for more
// details.
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detiled comments above Metric.
//
// The aggregation value is over the time interval (start_time_unix_nano,
// time_unix_nano].
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
//
// Value of 0 indicates that the timestamp is unspecified. In that case the
// timestamp may be decided by the backend.
fixed64 start_time_unix_nano = 2;
// time_unix_nano is the moment when this aggregation value was reported.
//
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
@ -415,28 +467,20 @@ message IntHistogramDataPoint {
// the number of elements in explicit_bounds array.
repeated fixed64 bucket_counts = 6;
// A histogram may optionally contain the distribution of the values in the population.
// In that case one of the option fields below and "buckets" field both must be defined.
// Otherwise all option fields and "buckets" field must be omitted in which case the
// distribution of values in the histogram is unknown and only the total count and sum are known.
// explicit_bounds is the only supported bucket option currently.
// TODO: Add more bucket options.
// explicit_bounds specifies buckets with explicitly defined bounds for values.
// The bucket boundaries are described by "bounds" field.
//
// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
// at index i are:
// This defines size(explicit_bounds) + 1 (= N) buckets. The boundaries for
// bucket at index i are:
//
// (-infinity, bounds[i]) for i == 0
// [bounds[i-1], bounds[i]) for 0 < i < N-1
// [bounds[i], +infinity) for i == N-1
// The values in bounds array must be strictly increasing.
// (-infinity, explicit_bounds[i]] for i == 0
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < N-1
// (explicit_bounds[i], +infinity) for i == N-1
//
// Note: only [a, b) intervals are currently supported for each bucket except the first one.
// If we decide to also support (a, b] intervals we should add support for these by defining
// a boolean value which decides what type of intervals to use.
// The values in the explicit_bounds array must be strictly increasing.
//
// Histogram buckets are inclusive of their upper boundary, except the last
// bucket where the boundary is at infinity. This format is intentionally
// compatible with the OpenMetrics histogram definition.
repeated double explicit_bounds = 7;
// (Optional) List of exemplars collected from
@ -448,26 +492,36 @@ message IntHistogramDataPoint {
// time-varying values of a Histogram of double values. A Histogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
message DoubleHistogramDataPoint {
// The set of labels that uniquely identify this timeseries.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1;
//
// If the histogram contains the distribution of values, then both
// "explicit_bounds" and "bucket counts" fields must be defined.
// If the histogram does not contain the distribution of values, then both
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
// "sum" are known.
message HistogramDataPoint {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 9;
// start_time_unix_nano is the last time when the aggregation value was reset
// to "zero". For some metric types this is ignored, see data types for more
// details.
// Labels is deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `labels` field.
// 2. New senders, which are aware of this change MUST send only `attributes`.
// 3. New receivers, which are aware of this change MUST convert this into
// `labels` by simply converting all int64 values into float.
//
// This field will be removed in ~3 months, on July 1, 2021.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1 [deprecated = true];
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detiled comments above Metric.
//
// The aggregation value is over the time interval (start_time_unix_nano,
// time_unix_nano].
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
//
// Value of 0 indicates that the timestamp is unspecified. In that case the
// timestamp may be decided by the backend.
fixed64 start_time_unix_nano = 2;
// time_unix_nano is the moment when this aggregation value was reported.
//
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
fixed64 time_unix_nano = 3;
@ -480,6 +534,12 @@ message DoubleHistogramDataPoint {
// sum of the values in the population. If count is zero then this field
// must be zero. This value must be equal to the sum of the "sum" fields in
// buckets if a histogram is provided.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
double sum = 5;
// bucket_counts is an optional field contains the count values of histogram
@ -491,56 +551,52 @@ message DoubleHistogramDataPoint {
// the number of elements in explicit_bounds array.
repeated fixed64 bucket_counts = 6;
// A histogram may optionally contain the distribution of the values in the population.
// In that case one of the option fields below and "buckets" field both must be defined.
// Otherwise all option fields and "buckets" field must be omitted in which case the
// distribution of values in the histogram is unknown and only the total count and sum are known.
// explicit_bounds is the only supported bucket option currently.
// TODO: Add more bucket options.
// explicit_bounds specifies buckets with explicitly defined bounds for values.
// The bucket boundaries are described by "bounds" field.
//
// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
// at index i are:
// This defines size(explicit_bounds) + 1 (= N) buckets. The boundaries for
// bucket at index i are:
//
// (-infinity, bounds[i]) for i == 0
// [bounds[i-1], bounds[i]) for 0 < i < N-1
// [bounds[i], +infinity) for i == N-1
// The values in bounds array must be strictly increasing.
// (-infinity, explicit_bounds[i]] for i == 0
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < N-1
// (explicit_bounds[i], +infinity) for i == N-1
//
// Note: only [a, b) intervals are currently supported for each bucket except the first one.
// If we decide to also support (a, b] intervals we should add support for these by defining
// a boolean value which decides what type of intervals to use.
// The values in the explicit_bounds array must be strictly increasing.
//
// Histogram buckets are inclusive of their upper boundary, except the last
// bucket where the boundary is at infinity. This format is intentionally
// compatible with the OpenMetrics histogram definition.
repeated double explicit_bounds = 7;
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
repeated DoubleExemplar exemplars = 8;
repeated Exemplar exemplars = 8;
}
// DoubleSummaryDataPoint is a single data point in a timeseries that describes the
// SummaryDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Summary metric.
message DoubleSummaryDataPoint {
// The set of labels that uniquely identify this timeseries.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1;
message SummaryDataPoint {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
// start_time_unix_nano is the last time when the aggregation value was reset
// to "zero". For some metric types this is ignored, see data types for more
// details.
// Labels is deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `labels` field.
// 2. New senders, which are aware of this change MUST send only `attributes`.
// 3. New receivers, which are aware of this change MUST convert this into
// `labels` by simply converting all int64 values into float.
//
// The aggregation value is over the time interval (start_time_unix_nano,
// time_unix_nano].
// This field will be removed in ~3 months, on July 1, 2021.
repeated opentelemetry.proto.common.v1.StringKeyValue labels = 1 [deprecated = true];
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detiled comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
//
// Value of 0 indicates that the timestamp is unspecified. In that case the
// timestamp may be decided by the backend.
fixed64 start_time_unix_nano = 2;
// time_unix_nano is the moment when this aggregation value was reported.
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
@ -551,6 +607,12 @@ message DoubleSummaryDataPoint {
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
double sum = 5;
// Represents the value at a given quantile of a distribution.
@ -567,6 +629,8 @@ message DoubleSummaryDataPoint {
double quantile = 1;
// The value at the given quantile of a distribution.
//
// Quantile values must NOT be negative.
double value = 2;
}
@ -580,6 +644,8 @@ message DoubleSummaryDataPoint {
// was recorded, for example the span and trace ID of the active span when the
// exemplar was recorded.
message IntExemplar {
option deprecated = true;
// The set of labels that were filtered out by the aggregator, but recorded
// alongside the original measurement. Only labels that were filtered out
// by the aggregator should be included
@ -605,15 +671,26 @@ message IntExemplar {
bytes trace_id = 5;
}
// A representation of an exemplar, which is a sample input double measurement.
// A representation of an exemplar, which is a sample input measurement.
// Exemplars also hold information about the environment when the measurement
// was recorded, for example the span and trace ID of the active span when the
// exemplar was recorded.
message DoubleExemplar {
// The set of labels that were filtered out by the aggregator, but recorded
// alongside the original measurement. Only labels that were filtered out
// by the aggregator should be included
repeated opentelemetry.proto.common.v1.StringKeyValue filtered_labels = 1;
message Exemplar {
// The set of key/value pairs that were filtered out by the aggregator, but
// recorded alongside the original measurement. Only key/value pairs that were
// filtered out by the aggregator should be included
repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7;
// Labels is deprecated and will be removed soon.
// 1. Old senders and receivers that are not aware of this change will
// continue using the `filtered_labels` field.
// 2. New senders, which are aware of this change MUST send only
// `filtered_attributes`.
// 3. New receivers, which are aware of this change MUST convert this into
// `filtered_labels` by simply converting all int64 values into float.
//
// This field will be removed in ~3 months, on July 1, 2021.
repeated opentelemetry.proto.common.v1.StringKeyValue filtered_labels = 1 [deprecated = true];
// time_unix_nano is the exact time when this exemplar was recorded
//
@ -621,8 +698,13 @@ message DoubleExemplar {
// 1970.
fixed64 time_unix_nano = 2;
// Numerical double value of the measurement that was recorded.
double value = 3;
// Numerical value of the measurement that was recorded. An exemplar is
// considered invalid when one of the recognized value fields is not present
// inside this oneof.
oneof value {
double as_double = 3;
sfixed64 as_int = 6;
}
// (Optional) Span ID of the exemplar trace.
// span_id may be missing if the measurement is not recorded inside a trace

View File

@ -32,6 +32,11 @@ message ResourceSpans {
// A list of InstrumentationLibrarySpans that originate from a resource.
repeated InstrumentationLibrarySpans instrumentation_library_spans = 2;
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "instrumentation_library_spans" field which have their own
// schema_url field.
string schema_url = 3;
}
// A collection of Spans produced by an InstrumentationLibrary.
@ -43,6 +48,9 @@ message InstrumentationLibrarySpans {
// A list of Spans that originate from an instrumentation library.
repeated Span spans = 2;
// This schema_url applies to all spans and span events in the "spans" field.
string schema_url = 3;
}
// Span represents a single operation within a trace. Spans can be
@ -107,7 +115,7 @@ message Span {
SPAN_KIND_UNSPECIFIED = 0;
// Indicates that the span represents an internal operation within an application,
// as opposed to an operations happening at the boundaries. Default value.
// as opposed to an operation happening at the boundaries. Default value.
SPAN_KIND_INTERNAL = 1;
// Indicates that the span covers server-side handling of an RPC or other
@ -300,7 +308,7 @@ message Status {
string message = 2;
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
enum StatusCode {
// The default status.
STATUS_CODE_UNSET = 0;